diff --git a/Makefile b/Makefile index 09a1d82d2d..e533b77511 100644 --- a/Makefile +++ b/Makefile @@ -49,6 +49,7 @@ push: requirements: @if [ "$(VIRTUAL_ENV)" ]; then \ (cd requirements && pip install --no-index -r dev_local.txt); \ + $(PYTHON) fix_virtualenv_setuptools.py; \ else \ (cd requirements && sudo pip install --no-index -r dev_local.txt); \ fi @@ -58,6 +59,7 @@ requirements: requirements_pypi: @if [ "$(VIRTUAL_ENV)" ]; then \ pip install -r requirements/dev.txt; \ + $(PYTHON) fix_virtualenv_setuptools.py; \ else \ sudo pip install -r requirements/dev.txt; \ fi diff --git a/awx/lib/site-packages/README b/awx/lib/site-packages/README index 137586591a..51f0e15747 100644 --- a/awx/lib/site-packages/README +++ b/awx/lib/site-packages/README @@ -1,23 +1,49 @@ Local versions of third-party packages required by AWX. Package names and versions are listed below, along with notes on which files are included. -amqp-1.0.13 (amqp/*) -anyjson-0.3.3 (anyjson/*) -billiard-2.7.3.32 (billiard/*, funtests/*, excluded _billiard.so) -celery-3.0.22 (celery/*, excluded bin/celery* and bin/camqadm) -django-auth-ldap-1.1.4 (django_auth_ldap/*) -django-celery-3.0.21 (djcelery/*, excluded bin/djcelerymon) -django-extensions-1.2.0 (django_extensions/*) -django-jsonfield-0.9.10 (jsonfield/*) -django-taggit-0.10 (taggit/*) -djangorestframework-2.3.7 (rest_framework/*) -importlib-1.0.2 (importlib/*, needed for Python 2.6 support) -kombu-2.5.14 (kombu/*) -Markdown-2.3.1 (markdown/*, excluded bin/markdown_py) -ordereddict-1.1 (ordereddict.py, needed for Python 2.6 support) -pexpect-2.4 (pexpect.py, pxssh.py, fdpexpect.py, FSM.py, screen.py, ANSI.py) -python-dateutil-2.1 (dateutil/*) -pytz-2013b (pytz/*) -requests-1.2.3 (requests/*) -six-1.3.0 (six.py) -South-0.8.2 (south/*) +amqp==1.2.1 (amqp/*) +anyjson==0.3.3 (anyjson/*) +Babel==1.3 (babel/*, excluded bin/pybabel) +billiard==2.7.3.32 (billiard/*, funtests/*, excluded _billiard.so) +boto==2.13.3 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin, + bin/cq, bin/cwutil, bin/dynamodb_dump, bin/dynamodb_load, bin/elbadmin, + bin/fetch_file, bin/glacier, bin/instance_events, bin/kill_instance, + bin/launch_instance, bin/list_instances, bin/lss3, bin/mturk, + bin/pyami_sendmail, bin/route53, bin/s3put, bin/sdbadmin, bin/taskadmin) +celery==3.0.23 (celery/*, excluded bin/celery* and bin/camqadm) +d2to1==0.2.11 (d2to1/*) +distribute==0.7.3 (no files) +django-auth-ldap==1.1.4 (django_auth_ldap/*) +django-celery==3.0.23 (djcelery/*, excluded bin/djcelerymon) +django-extensions==1.2.2 (django_extensions/*) +django-jsonfield==0.9.10 (jsonfield/*) +django-taggit==0.10 (taggit/*) +djangorestframework==2.3.8 (rest_framework/*) +httplib2==0.8 (httplib2/*) +importlib==1.0.2 (importlib/*, needed for Python 2.6 support) +iso8601==0.1.4 (iso8601/*) +keyring==3.0.5 (keyring/*, excluded bin/keyring) +kombu==2.5.14 (kombu/*) +Markdown==2.3.1 (markdown/*, excluded bin/markdown_py) +mock==1.0.1 (mock.py) +ordereddict==1.1 (ordereddict.py, needed for Python 2.6 support) +os-diskconfig-python-novaclient-ext==0.1.1 (os_diskconfig_python_novaclient_ext/*) +os-networksv2-python-novaclient-ext==0.21 (os_networksv2_python_novaclient_ext.py) +pbr==0.5.21 (pbr/*) +pexpect==2.4 (pexpect.py, pxssh.py, fdpexpect.py, FSM.py, screen.py, ANSI.py) +pip==1.4.1 (pip/*, excluded bin/pip*) +prettytable==0.7.2 (prettytable.py) +pyrax==1.5.0 (pyrax/*) +python-dateutil==2.1 (dateutil/*) +python-novaclient==2.15.0 (novaclient/*, excluded bin/nova) +python-swiftclient==1.6.0 (swiftclient/*, excluded bin/swift) +pytz==2013d (pytz/*) +rackspace-auth-openstack==1.0 (rackspace_auth_openstack/*) +rackspace-novaclient==1.3 (no files) +rax-default-network-flags-python-novaclient-ext==0.1.3 (rax_default_network_flags_python_novaclient_ext/*) +rax-scheduled-images-python-novaclient-ext==0.2.1 (rax_scheduled_images_python_novaclient_ext/*) +requests==2.0.0 (requests/*) +setuptools==1.1.6 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*) +simplejson==3.3.0 (simplejson/*, excluded simplejson/_speedups.so) +six==1.4.1 (six.py) +South==0.8.2 (south/*) diff --git a/awx/lib/site-packages/_markerlib/__init__.py b/awx/lib/site-packages/_markerlib/__init__.py new file mode 100644 index 0000000000..e2b237b1f6 --- /dev/null +++ b/awx/lib/site-packages/_markerlib/__init__.py @@ -0,0 +1,16 @@ +try: + import ast + from _markerlib.markers import default_environment, compile, interpret +except ImportError: + if 'ast' in globals(): + raise + def default_environment(): + return {} + def compile(marker): + def marker_fn(environment=None, override=None): + # 'empty markers are True' heuristic won't install extra deps. + return not marker.strip() + marker_fn.__doc__ = marker + return marker_fn + def interpret(marker, environment=None, override=None): + return compile(marker)() diff --git a/awx/lib/site-packages/_markerlib/markers.py b/awx/lib/site-packages/_markerlib/markers.py new file mode 100644 index 0000000000..fa837061e0 --- /dev/null +++ b/awx/lib/site-packages/_markerlib/markers.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +"""Interpret PEP 345 environment markers. + +EXPR [in|==|!=|not in] EXPR [or|and] ... + +where EXPR belongs to any of those: + + python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1]) + python_full_version = sys.version.split()[0] + os.name = os.name + sys.platform = sys.platform + platform.version = platform.version() + platform.machine = platform.machine() + platform.python_implementation = platform.python_implementation() + a free string, like '2.6', or 'win32' +""" + +__all__ = ['default_environment', 'compile', 'interpret'] + +import ast +import os +import platform +import sys +import weakref + +_builtin_compile = compile + +try: + from platform import python_implementation +except ImportError: + if os.name == "java": + # Jython 2.5 has ast module, but not platform.python_implementation() function. + def python_implementation(): + return "Jython" + else: + raise + + +# restricted set of variables +_VARS = {'sys.platform': sys.platform, + 'python_version': '%s.%s' % sys.version_info[:2], + # FIXME parsing sys.platform is not reliable, but there is no other + # way to get e.g. 2.7.2+, and the PEP is defined with sys.version + 'python_full_version': sys.version.split(' ', 1)[0], + 'os.name': os.name, + 'platform.version': platform.version(), + 'platform.machine': platform.machine(), + 'platform.python_implementation': python_implementation(), + 'extra': None # wheel extension + } + +for var in list(_VARS.keys()): + if '.' in var: + _VARS[var.replace('.', '_')] = _VARS[var] + +def default_environment(): + """Return copy of default PEP 385 globals dictionary.""" + return dict(_VARS) + +class ASTWhitelist(ast.NodeTransformer): + def __init__(self, statement): + self.statement = statement # for error messages + + ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str) + # Bool operations + ALLOWED += (ast.And, ast.Or) + # Comparison operations + ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn) + + def visit(self, node): + """Ensure statement only contains allowed nodes.""" + if not isinstance(node, self.ALLOWED): + raise SyntaxError('Not allowed in environment markers.\n%s\n%s' % + (self.statement, + (' ' * node.col_offset) + '^')) + return ast.NodeTransformer.visit(self, node) + + def visit_Attribute(self, node): + """Flatten one level of attribute access.""" + new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx) + return ast.copy_location(new_node, node) + +def parse_marker(marker): + tree = ast.parse(marker, mode='eval') + new_tree = ASTWhitelist(marker).generic_visit(tree) + return new_tree + +def compile_marker(parsed_marker): + return _builtin_compile(parsed_marker, '', 'eval', + dont_inherit=True) + +_cache = weakref.WeakValueDictionary() + +def compile(marker): + """Return compiled marker as a function accepting an environment dict.""" + try: + return _cache[marker] + except KeyError: + pass + if not marker.strip(): + def marker_fn(environment=None, override=None): + """""" + return True + else: + compiled_marker = compile_marker(parse_marker(marker)) + def marker_fn(environment=None, override=None): + """override updates environment""" + if override is None: + override = {} + if environment is None: + environment = default_environment() + environment.update(override) + return eval(compiled_marker, environment) + marker_fn.__doc__ = marker + _cache[marker] = marker_fn + return _cache[marker] + +def interpret(marker, environment=None): + return compile(marker)(environment) diff --git a/awx/lib/site-packages/amqp/__init__.py b/awx/lib/site-packages/amqp/__init__.py index fca3bb317d..00bd9e2ce4 100644 --- a/awx/lib/site-packages/amqp/__init__.py +++ b/awx/lib/site-packages/amqp/__init__.py @@ -16,7 +16,7 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 from __future__ import absolute_import -VERSION = (1, 0, 13) +VERSION = (1, 2, 1) __version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:]) __author__ = 'Barry Pederson' __maintainer__ = 'Ask Solem' @@ -29,22 +29,41 @@ __docformat__ = 'restructuredtext' # # Pull in the public items from the various sub-modules # -from .basic_message import Message -from .channel import Channel -from .connection import Connection -from .exceptions import ( +from .basic_message import Message # noqa +from .channel import Channel # noqa +from .connection import Connection # noqa +from .exceptions import ( # noqa AMQPError, ConnectionError, + RecoverableConnectionError, + IrrecoverableConnectionError, ChannelError, - ConsumerCancel, + RecoverableChannelError, + IrrecoverableChannelError, + ConsumerCancelled, + ContentTooLarge, + NoConsumers, + ConnectionForced, + InvalidPath, + AccessRefused, + NotFound, + ResourceLocked, + PreconditionFailed, + FrameError, + FrameSyntaxError, + InvalidCommand, + ChannelNotOpen, + UnexpectedFrame, + ResourceError, + NotAllowed, + AMQPNotImplementedError, + InternalError, + error_for_code, + __all__ as _all_exceptions, ) __all__ = [ 'Connection', 'Channel', 'Message', - 'AMQPError', - 'ConnectionError', - 'ChannelError', - 'ConsumerCancel', -] +] + _all_exceptions diff --git a/awx/lib/site-packages/amqp/abstract_channel.py b/awx/lib/site-packages/amqp/abstract_channel.py index ea53fde7e2..5e37bf971c 100644 --- a/awx/lib/site-packages/amqp/abstract_channel.py +++ b/awx/lib/site-packages/amqp/abstract_channel.py @@ -1,5 +1,5 @@ """Code common to Connection and Channel objects.""" -# Copyright (C) 2007-2008 Barry Pederson +# Copyright (C) 2007-2008 Barry Pederson ) # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -16,7 +16,7 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 from __future__ import absolute_import -from .exceptions import AMQPError +from .exceptions import AMQPNotImplementedError, RecoverableConnectionError from .serialization import AMQPWriter try: @@ -51,10 +51,14 @@ class AbstractChannel(object): def _send_method(self, method_sig, args=bytes(), content=None): """Send a method for our channel.""" + conn = self.connection + if conn is None: + raise RecoverableConnectionError('connection already closed') + if isinstance(args, AMQPWriter): args = args.getvalue() - self.connection.method_writer.write_method( + conn.method_writer.write_method( self.channel_id, method_sig, args, content, ) @@ -82,7 +86,8 @@ class AbstractChannel(object): try: amqp_method = self._METHOD_MAP[method_sig] except KeyError: - raise AMQPError('Unknown AMQP method %r' % (method_sig, )) + raise AMQPNotImplementedError( + 'Unknown AMQP method {0!r}'.format(method_sig)) if content is None: return amqp_method(self, args) diff --git a/awx/lib/site-packages/amqp/channel.py b/awx/lib/site-packages/amqp/channel.py index fa6cd2dcf7..48188a5106 100644 --- a/awx/lib/site-packages/amqp/channel.py +++ b/awx/lib/site-packages/amqp/channel.py @@ -19,11 +19,11 @@ from __future__ import absolute_import import logging from collections import defaultdict -from Queue import Queue from warnings import warn from .abstract_channel import AbstractChannel -from .exceptions import ChannelError, ConsumerCancel +from .exceptions import ChannelError, ConsumerCancelled, error_for_code +from .five import Queue from .serialization import AMQPWriter __all__ = ['Channel'] @@ -221,7 +221,9 @@ class Channel(AbstractChannel): self._send_method((20, 41)) self._do_revive() - raise ChannelError(reply_code, reply_text, (class_id, method_id)) + raise error_for_code( + reply_code, reply_text, (class_id, method_id), ChannelError, + ) def _close_ok(self, args): """Confirm a channel close @@ -1625,7 +1627,7 @@ class Channel(AbstractChannel): if callback: callback(consumer_tag) else: - raise ConsumerCancel('tag %r' % (consumer_tag, )) + raise ConsumerCancelled(consumer_tag, (60, 30)) def _basic_cancel_ok(self, args): """Confirm a cancelled consumer diff --git a/awx/lib/site-packages/amqp/connection.py b/awx/lib/site-packages/amqp/connection.py index 6768513d68..12b72128a7 100644 --- a/awx/lib/site-packages/amqp/connection.py +++ b/awx/lib/site-packages/amqp/connection.py @@ -29,7 +29,12 @@ except ImportError: from . import __version__ from .abstract_channel import AbstractChannel from .channel import Channel -from .exceptions import ChannelError, ConnectionError +from .exceptions import ( + AMQPNotImplementedError, ChannelError, ResourceError, + ConnectionForced, ConnectionError, error_for_code, + RecoverableConnectionError, RecoverableChannelError, +) +from .five import items, range, values from .method_framing import MethodReader, MethodWriter from .serialization import AMQPWriter from .transport import create_transport @@ -83,7 +88,8 @@ class Connection(AbstractChannel): login_method='AMQPLAIN', login_response=None, virtual_host='/', locale='en_US', client_properties=None, ssl=False, connect_timeout=None, channel_max=None, - frame_max=None, heartbeat=0, **kwargs): + frame_max=None, heartbeat=0, on_blocked=None, + on_unblocked=None, **kwargs): """Create a connection to the specified host, which should be a 'host[:port]', such as 'localhost', or '1.2.3.4:5672' (defaults to 'localhost', if a port is not specified then @@ -121,6 +127,10 @@ class Connection(AbstractChannel): self.frame_max = frame_max self.heartbeat = heartbeat + # Callbacks + self.on_blocked = on_blocked + self.on_unblocked = on_unblocked + self._avail_channel_ids = array('H', range(self.channel_max, 0, -1)) # Properties set in the Start method @@ -157,7 +167,7 @@ class Connection(AbstractChannel): try: self.transport.close() - temp_list = [x for x in self.channels.values() if x is not self] + temp_list = [x for x in values(self.channels) if x is not self] for ch in temp_list: ch._do_close() except socket.error: @@ -169,8 +179,8 @@ class Connection(AbstractChannel): try: return self._avail_channel_ids.pop() except IndexError: - raise ConnectionError( - 'No free channel ids, current=%d, channel_max=%d' % ( + raise ResourceError( + 'No free channel ids, current={0}, channel_max={1}'.format( len(self.channels), self.channel_max), (20, 10)) def _claim_channel_id(self, channel_id): @@ -225,7 +235,7 @@ class Connection(AbstractChannel): # this method for later # self.channels[channel].method_queue.append( - (method_sig, args, content) + (method_sig, args, content), ) # @@ -280,7 +290,8 @@ class Connection(AbstractChannel): channel._METHOD_MAP.get(method_sig, None)) if amqp_method is None: - raise Exception('Unknown AMQP method %r' % (method_sig, )) + raise AMQPNotImplementedError( + 'Unknown AMQP method {0!r}'.format(method_sig)) if content is None: return amqp_method(channel, args) @@ -297,7 +308,7 @@ class Connection(AbstractChannel): try: try: return self.method_reader.read_method() - except SSLError, exc: + except SSLError as exc: # http://bugs.python.org/issue10272 if 'timed out' in str(exc): raise socket.timeout() @@ -310,7 +321,7 @@ class Connection(AbstractChannel): sock.settimeout(prev) def _wait_multiple(self, channels, allowed_methods, timeout=None): - for channel_id, channel in channels.iteritems(): + for channel_id, channel in items(channels): method_queue = channel.method_queue for queued_method in method_queue: method_sig = queued_method[0] @@ -351,7 +362,7 @@ class Connection(AbstractChannel): exchange = args.read_shortstr() routing_key = args.read_shortstr() - exc = ChannelError('basic.return', reply_code, reply_text, (50, 60)) + exc = error_for_code(reply_code, reply_text, (50, 60), ChannelError) handlers = channel.events.get('basic_return') if not handlers: raise exc @@ -488,7 +499,18 @@ class Connection(AbstractChannel): self._x_close_ok() - raise ConnectionError(reply_code, reply_text, (class_id, method_id)) + raise error_for_code(reply_code, reply_text, + (class_id, method_id), ConnectionError) + + def _blocked(self, args): + """RabbitMQ Extension.""" + reason = args.read_shortstr() + if self.on_blocked: + return self.on_blocked(reason) + + def _unblocked(self, *args): + if self.on_unblocked: + return self.on_unblocked() def _x_close_ok(self): """Confirm a connection close @@ -759,6 +781,10 @@ class Connection(AbstractChannel): if 'capabilities' not in client_properties: client_properties['capabilities'] = {} client_properties['capabilities']['consumer_cancel_notify'] = True + if self.server_capabilities.get('connection.blocked'): + if 'capabilities' not in client_properties: + client_properties['capabilities'] = {} + client_properties['capabilities']['connection.blocked'] = True args = AMQPWriter() args.write_table(client_properties) args.write_shortstr(mechanism) @@ -841,7 +867,7 @@ class Connection(AbstractChannel): self.prev_sent, self.prev_recv = sent_now, recv_now if self.missed_heartbeats >= rate: - raise ConnectionError('Too many heartbeats missed') + raise ConnectionForced('Too many heartbeats missed') def _x_tune_ok(self, channel_max, frame_max, heartbeat): """Negotiate connection tuning parameters @@ -914,6 +940,8 @@ class Connection(AbstractChannel): (10, 41): _open_ok, (10, 50): _close, (10, 51): _close_ok, + (10, 60): _blocked, + (10, 61): _unblocked, } _IMMEDIATE_METHODS = [] @@ -924,3 +952,12 @@ class Connection(AbstractChannel): OSError, ) channel_errors = (ChannelError, ) + recoverable_connection_errors = ( + RecoverableConnectionError, + socket.error, + IOError, + OSError, + ) + recoverable_channel_errors = ( + RecoverableChannelError, + ) diff --git a/awx/lib/site-packages/amqp/exceptions.py b/awx/lib/site-packages/amqp/exceptions.py index 7f786a9e5c..d8bcf33548 100644 --- a/awx/lib/site-packages/amqp/exceptions.py +++ b/awx/lib/site-packages/amqp/exceptions.py @@ -18,29 +18,40 @@ from __future__ import absolute_import from struct import pack, unpack -__all__ = ['AMQPError', 'ConnectionError', 'ChannelError'] +__all__ = [ + 'AMQPError', + 'ConnectionError', 'ChannelError', + 'RecoverableConnectionError', 'IrrecoverableConnectionError', + 'RecoverableChannelError', 'IrrecoverableChannelError', + 'ConsumerCancelled', 'ContentTooLarge', 'NoConsumers', + 'ConnectionForced', 'InvalidPath', 'AccessRefused', 'NotFound', + 'ResourceLocked', 'PreconditionFailed', 'FrameError', 'FrameSyntaxError', + 'InvalidCommand', 'ChannelNotOpen', 'UnexpectedFrame', 'ResourceError', + 'NotAllowed', 'AMQPNotImplementedError', 'InternalError', +] class AMQPError(Exception): + code = 0 - def __init__(self, msg, reply_code=None, reply_text=None, - method_sig=None, method_name=None): - self.message = msg - self.amqp_reply_code = reply_code - self.amqp_reply_text = reply_text - self.amqp_method_sig = method_sig + def __init__(self, reply_text=None, method_sig=None, + method_name=None, reply_code=None): + self.message = reply_text + self.reply_code = reply_code or self.code + self.reply_text = reply_text + self.method_sig = method_sig self.method_name = method_name or '' if method_sig and not self.method_name: self.method_name = METHOD_NAME_MAP.get(method_sig, '') - Exception.__init__(self, msg, reply_code, + Exception.__init__(self, reply_code, reply_text, method_sig, self.method_name) def __str__(self): - if self.amqp_reply_code: - return '%s: (%s, %s, %s)' % ( - self.message, self.amqp_reply_code, self.amqp_reply_text, - self.amqp_method_sig) - return self.message + return '{0.method}: ({0.reply_code}) {0.reply_text}'.format(self) + + @property + def method(self): + return self.method_name or self.method_sig class ConnectionError(AMQPError): @@ -51,10 +62,130 @@ class ChannelError(AMQPError): pass -class ConsumerCancel(ChannelError): +class RecoverableChannelError(ChannelError): pass +class IrrecoverableChannelError(ChannelError): + pass + + +class RecoverableConnectionError(ConnectionError): + pass + + +class IrrecoverableConnectionError(ConnectionError): + pass + + +class Blocked(RecoverableConnectionError): + pass + + +class ConsumerCancelled(RecoverableConnectionError): + pass + + +class ContentTooLarge(RecoverableChannelError): + code = 311 + + +class NoConsumers(RecoverableChannelError): + code = 313 + + +class ConnectionForced(RecoverableConnectionError): + code = 320 + + +class InvalidPath(IrrecoverableConnectionError): + code = 402 + + +class AccessRefused(IrrecoverableChannelError): + code = 403 + + +class NotFound(IrrecoverableChannelError): + code = 404 + + +class ResourceLocked(RecoverableChannelError): + code = 405 + + +class PreconditionFailed(IrrecoverableChannelError): + code = 406 + + +class FrameError(IrrecoverableConnectionError): + code = 501 + + +class FrameSyntaxError(IrrecoverableConnectionError): + code = 502 + + +class InvalidCommand(IrrecoverableConnectionError): + code = 503 + + +class ChannelNotOpen(IrrecoverableConnectionError): + code = 504 + + +class UnexpectedFrame(IrrecoverableConnectionError): + code = 505 + + +class ResourceError(RecoverableConnectionError): + code = 506 + + +class NotAllowed(IrrecoverableConnectionError): + code = 530 + + +class AMQPNotImplementedError(IrrecoverableConnectionError): + code = 540 + + +class InternalError(IrrecoverableConnectionError): + code = 541 + + +ERROR_MAP = { + 311: ContentTooLarge, + 313: NoConsumers, + 320: ConnectionForced, + 402: InvalidPath, + 403: AccessRefused, + 404: NotFound, + 405: ResourceLocked, + 406: PreconditionFailed, + 501: FrameError, + 502: FrameSyntaxError, + 503: InvalidCommand, + 504: ChannelNotOpen, + 505: UnexpectedFrame, + 506: ResourceError, + 530: NotAllowed, + 540: AMQPNotImplementedError, + 541: InternalError, +} + + +def error_for_code(code, text, method, default): + try: + return ERROR_MAP[code](text, method, reply_code=code) + except KeyError: + return default(text, method, reply_code=code) + + +def raise_for_code(code, text, method, default): + raise error_for_code(code, text, method, default) + + METHOD_NAME_MAP = { (10, 10): 'Connection.start', (10, 11): 'Connection.start_ok', diff --git a/awx/lib/site-packages/amqp/five.py b/awx/lib/site-packages/amqp/five.py new file mode 100644 index 0000000000..25b83fc08e --- /dev/null +++ b/awx/lib/site-packages/amqp/five.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +""" + celery.five + ~~~~~~~~~~~ + + Compatibility implementations of features + only available in newer Python versions. + + +""" +from __future__ import absolute_import + +############## py3k ######################################################### +import sys +PY3 = sys.version_info[0] == 3 + +try: + reload = reload # noqa +except NameError: # pragma: no cover + from imp import reload # noqa + +try: + from UserList import UserList # noqa +except ImportError: # pragma: no cover + from collections import UserList # noqa + +try: + from UserDict import UserDict # noqa +except ImportError: # pragma: no cover + from collections import UserDict # noqa + + +if PY3: + import builtins + + from queue import Queue, Empty + from itertools import zip_longest + from io import StringIO, BytesIO + + map = map + string = str + string_t = str + long_t = int + text_t = str + range = range + int_types = (int, ) + + open_fqdn = 'builtins.open' + + def items(d): + return d.items() + + def keys(d): + return d.keys() + + def values(d): + return d.values() + + def nextfun(it): + return it.__next__ + + exec_ = getattr(builtins, 'exec') + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + class WhateverIO(StringIO): + + def write(self, data): + if isinstance(data, bytes): + data = data.encode() + StringIO.write(self, data) + +else: + import __builtin__ as builtins # noqa + from Queue import Queue, Empty # noqa + from itertools import imap as map, izip_longest as zip_longest # noqa + from StringIO import StringIO # noqa + string = unicode # noqa + string_t = basestring # noqa + text_t = unicode + long_t = long # noqa + range = xrange + int_types = (int, long) + + open_fqdn = '__builtin__.open' + + def items(d): # noqa + return d.iteritems() + + def keys(d): # noqa + return d.iterkeys() + + def values(d): # noqa + return d.itervalues() + + def nextfun(it): # noqa + return it.next + + def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + BytesIO = WhateverIO = StringIO # noqa + + +def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): + """Class decorator to set metaclass. + + Works with both Python 3 and Python 3 and it does not add + an extra class in the lookup order like ``six.with_metaclass`` does + (that is -- it copies the original class instead of using inheritance). + + """ + + def _clone_with_metaclass(Class): + attrs = dict((key, value) for key, value in items(vars(Class)) + if key not in skip_attrs) + return Type(Class.__name__, Class.__bases__, attrs) + + return _clone_with_metaclass diff --git a/awx/lib/site-packages/amqp/method_framing.py b/awx/lib/site-packages/amqp/method_framing.py index a26db6672b..a2722139bc 100644 --- a/awx/lib/site-packages/amqp/method_framing.py +++ b/awx/lib/site-packages/amqp/method_framing.py @@ -26,7 +26,8 @@ except NameError: bytes = str from .basic_message import Message -from .exceptions import AMQPError +from .exceptions import AMQPError, UnexpectedFrame +from .five import range, string from .serialization import AMQPReader __all__ = ['MethodReader'] @@ -110,11 +111,11 @@ class MethodReader(object): while not queue: try: frame_type, channel, payload = read_frame() - except Exception, e: + except Exception as exc: # # Connection was closed? Framing Error? # - put(e) + put(exc) break self.bytes_recv += 1 @@ -122,8 +123,8 @@ class MethodReader(object): if frame_type not in (self.expected_types[channel], 8): put(( channel, - AMQPError( - 'Received frame type %s while expecting type: %s' % ( + UnexpectedFrame( + 'Received frame {0} while expecting type: {1}'.format( frame_type, self.expected_types[channel])))) elif frame_type == 1: self._process_method_frame(channel, payload) @@ -213,7 +214,7 @@ class MethodWriter(object): # problem with the content properties, before sending the # first frame body = content.body - if isinstance(body, unicode): + if isinstance(body, string): coding = content.properties.get('content_encoding', None) if coding is None: coding = content.properties['content_encoding'] = 'UTF-8' @@ -229,6 +230,6 @@ class MethodWriter(object): write_frame(2, channel, payload) chunk_size = self.frame_max - 8 - for i in xrange(0, len(body), chunk_size): + for i in range(0, len(body), chunk_size): write_frame(3, channel, body[i:i + chunk_size]) self.bytes_sent += 1 diff --git a/awx/lib/site-packages/amqp/serialization.py b/awx/lib/site-packages/amqp/serialization.py index bcb3b90b47..6a747029e3 100644 --- a/awx/lib/site-packages/amqp/serialization.py +++ b/awx/lib/site-packages/amqp/serialization.py @@ -28,6 +28,9 @@ from decimal import Decimal from struct import pack, unpack from time import mktime +from .exceptions import FrameSyntaxError +from .five import int_types, long_t, string, string_t, items + IS_PY3K = sys.version_info[0] >= 3 if IS_PY3K: @@ -50,6 +53,14 @@ except NameError: # Python 2.5 and lower bytes = str +ILLEGAL_TABLE_TYPE_WITH_KEY = """\ +Table type {0!r} for key {1!r} not handled by amqp. [value: {2!r}] +""" + +ILLEGAL_TABLE_TYPE = """\ + Table type {0!r} not handled by amqp. [value: {1!r}] +""" + class AMQPReader(object): """Read higher-level AMQP types from a bytestream.""" @@ -164,8 +175,8 @@ class AMQPReader(object): elif ftype == 100: val = self.read_float() else: - raise ValueError( - 'Unknown value in table: %r (%r)' % ( + raise FrameSyntaxError( + 'Unknown value in table: {0!r} ({1!r})'.format( ftype, type(ftype))) return val @@ -245,28 +256,32 @@ class AMQPWriter(object): def write_octet(self, n): """Write an integer as an unsigned 8-bit value.""" if n < 0 or n > 255: - raise ValueError('Octet %r out of range 0..255' % (n, )) + raise FrameSyntaxError( + 'Octet {0!r} out of range 0..255'.format(n)) self._flushbits() self.out.write(pack('B', n)) def write_short(self, n): """Write an integer as an unsigned 16-bit value.""" if n < 0 or n > 65535: - raise ValueError('Octet %r out of range 0..65535' % (n, )) + raise FrameSyntaxError( + 'Octet {0!r} out of range 0..65535'.format(n)) self._flushbits() self.out.write(pack('>H', int(n))) def write_long(self, n): """Write an integer as an unsigned2 32-bit value.""" if n < 0 or n >= 4294967296: - raise ValueError('Octet %r out of range 0..2**31-1' % (n, )) + raise FrameSyntaxError( + 'Octet {0!r} out of range 0..2**31-1'.format(n)) self._flushbits() self.out.write(pack('>I', n)) def write_longlong(self, n): """Write an integer as an unsigned 64-bit value.""" if n < 0 or n >= 18446744073709551616: - raise ValueError('Octet %r out of range 0..2**64-1' % (n, )) + raise FrameSyntaxError( + 'Octet {0!r} out of range 0..2**64-1'.format(n)) self._flushbits() self.out.write(pack('>Q', n)) @@ -277,10 +292,11 @@ class AMQPWriter(object): """ self._flushbits() - if isinstance(s, unicode): + if isinstance(s, string): s = s.encode('utf-8') if len(s) > 255: - raise ValueError('String too long (%r)' % (len(s), )) + raise FrameSyntaxError( + 'Shortstring overflow ({0} > 255)'.format(len(s))) self.write_octet(len(s)) self.out.write(s) @@ -291,7 +307,7 @@ class AMQPWriter(object): """ self._flushbits() - if isinstance(s, unicode): + if isinstance(s, string): s = s.encode('utf-8') self.write_long(len(s)) self.out.write(s) @@ -302,27 +318,27 @@ class AMQPWriter(object): sub-dictionaries following the same constraints.""" self._flushbits() table_data = AMQPWriter() - for k, v in d.iteritems(): + for k, v in items(d): table_data.write_shortstr(k) - table_data.write_item(v) + table_data.write_item(v, k) table_data = table_data.getvalue() self.write_long(len(table_data)) self.out.write(table_data) - def write_item(self, v): - if isinstance(v, basestring): - if isinstance(v, unicode): + def write_item(self, v, k=None): + if isinstance(v, (string_t, bytes)): + if isinstance(v, string): v = v.encode('utf-8') - self.write(byte(83)) # 'S' + self.write(b'S') self.write_longstr(v) elif isinstance(v, bool): - self.write(pack('>cB', byte(116), int(v))) # 't' + self.write(pack('>cB', b't', int(v))) elif isinstance(v, float): - self.write(pack('>cd', byte(100), v)) # 'd' - elif isinstance(v, (int, long)): - self.write(pack('>ci', byte(73), v)) # 'I' + self.write(pack('>cd', b'd', v)) + elif isinstance(v, int_types): + self.write(pack('>ci', b'I', v)) elif isinstance(v, Decimal): - self.write(byte(68)) # 'D' + self.write(b'D') sign, digits, exponent = v.as_tuple() v = 0 for d in digits: @@ -332,19 +348,19 @@ class AMQPWriter(object): self.write_octet(-exponent) self.write(pack('>i', v)) elif isinstance(v, datetime): - self.write(byte(84)) # 'T' + self.write(b'T') self.write_timestamp(v) ## FIXME: timezone ? elif isinstance(v, dict): - self.write(byte(70)) # 'F' + self.write(b'F') self.write_table(v) elif isinstance(v, (list, tuple)): - self.write(byte(65)) # 'A' + self.write(b'A') self.write_array(v) else: - raise ValueError( - 'Table type %r not handled by amqp: %r' % ( - type(v), v)) + err = (ILLEGAL_TABLE_TYPE_WITH_KEY.format(type(v), k, v) if k + else ILLEGAL_TABLE_TYPE.format(type(v), v)) + raise FrameSyntaxError(err) def write_array(self, a): array_data = AMQPWriter() @@ -357,7 +373,7 @@ class AMQPWriter(object): def write_timestamp(self, v): """Write out a Python datetime.datetime object as a 64-bit integer representing seconds since the Unix epoch.""" - self.out.write(pack('>q', long(mktime(v.timetuple())))) + self.out.write(pack('>q', long_t(mktime(v.timetuple())))) class GenericContent(object): diff --git a/awx/lib/site-packages/amqp/transport.py b/awx/lib/site-packages/amqp/transport.py index b441a11ced..6b17757cae 100644 --- a/awx/lib/site-packages/amqp/transport.py +++ b/awx/lib/site-packages/amqp/transport.py @@ -48,7 +48,7 @@ except: from struct import pack, unpack -from .exceptions import AMQPError +from .exceptions import UnexpectedFrame AMQP_PORT = 5672 @@ -65,7 +65,7 @@ class _AbstractTransport(object): """Common superclass for TCP and SSL transports""" def __init__(self, host, connect_timeout): - msg = 'socket.getaddrinfo() for %s returned an empty list' % host + msg = None port = AMQP_PORT m = IPV6_LITERAL.match(host) @@ -87,7 +87,8 @@ class _AbstractTransport(object): self.sock = socket.socket(af, socktype, proto) self.sock.settimeout(connect_timeout) self.sock.connect(sa) - except socket.error, msg: + except socket.error as exc: + msg = exc self.sock.close() self.sock = None last_err = msg @@ -142,7 +143,6 @@ class _AbstractTransport(object): self.sock = None def read_frame(self, unpack=unpack): - """Read an AMQP frame.""" read = self._read frame_type, channel, size = unpack('>BHI', read(7, True)) payload = read(size) @@ -150,15 +150,15 @@ class _AbstractTransport(object): if ch == 206: # '\xce' return frame_type, channel, payload else: - raise AMQPError( - 'Framing Error, received 0x%02x while expecting 0xce' % ch) + raise UnexpectedFrame( + 'Received 0x{0:02x} while expecting 0xce'.format(ch)) def write_frame(self, frame_type, channel, payload): - """Write out an AMQP frame.""" size = len(payload) - self._write( - pack('>BHI%dsB' % size, frame_type, channel, size, payload, 0xce), - ) + self._write(pack( + '>BHI%dsB' % size, + frame_type, channel, size, payload, 0xce, + )) class SSLTransport(_AbstractTransport): @@ -203,7 +203,7 @@ class SSLTransport(_AbstractTransport): while len(rbuf) < n: try: s = recv(131072) # see note above - except socket.error, exc: + except socket.error as exc: # ssl.sock.read may cause ENOENT if the # operation couldn't be performed (Issue celery#1414). if not initial and exc.errno in _errnos: @@ -243,7 +243,7 @@ class TCPTransport(_AbstractTransport): while len(rbuf) < n: try: s = recv(131072) - except socket.error, exc: + except socket.error as exc: if not initial and exc.errno in _errnos: continue raise diff --git a/awx/lib/site-packages/amqp/utils.py b/awx/lib/site-packages/amqp/utils.py new file mode 100644 index 0000000000..05dbc93bf8 --- /dev/null +++ b/awx/lib/site-packages/amqp/utils.py @@ -0,0 +1,61 @@ +from __future__ import absolute_import + +import sys + + +class promise(object): + if not hasattr(sys, 'pypy_version_info'): + __slots__ = tuple( + 'fun args kwargs value ready failed on_success on_error'.split() + ) + + def __init__(self, fun, args=(), kwargs=(), + on_success=None, on_error=None): + self.fun = fun + self.args = args + self.kwargs = kwargs + self.ready = False + self.failed = False + self.on_success = on_success + self.on_error = on_error + self.value = None + + def __repr__(self): + return '<$: {0.fun.__name__}(*{0.args!r}, **{0.kwargs!r})'.format( + self, + ) + + def __call__(self, *args, **kwargs): + try: + self.value = self.fun( + *self.args + args if self.args else args, + **dict(self.kwargs, **kwargs) if self.kwargs else kwargs + ) + except Exception as exc: + self.set_error_state(exc) + else: + if self.on_success: + self.on_success(self.value) + finally: + self.ready = True + + def then(self, callback=None, on_error=None): + self.on_success = callback + self.on_error = on_error + return callback + + def set_error_state(self, exc): + self.failed = True + if self.on_error is None: + raise + self.on_error(exc) + + def throw(self, exc): + try: + raise exc + except exc.__class__ as with_cause: + self.set_error_state(with_cause) + + +def noop(): + return promise(lambda *a, **k: None) diff --git a/awx/lib/site-packages/babel/__init__.py b/awx/lib/site-packages/babel/__init__.py new file mode 100644 index 0000000000..dd9f17e04d --- /dev/null +++ b/awx/lib/site-packages/babel/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +""" + babel + ~~~~~ + + Integrated collection of utilities that assist in internationalizing and + localizing applications. + + This package is basically composed of two major parts: + + * tools to build and work with ``gettext`` message catalogs + * a Python interface to the CLDR (Common Locale Data Repository), providing + access to various locale display names, localized number and date + formatting, etc. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +from babel.core import UnknownLocaleError, Locale, default_locale, \ + negotiate_locale, parse_locale, get_locale_identifier + + +__version__ = '1.3' diff --git a/awx/lib/site-packages/babel/_compat.py b/awx/lib/site-packages/babel/_compat.py new file mode 100644 index 0000000000..86096daa61 --- /dev/null +++ b/awx/lib/site-packages/babel/_compat.py @@ -0,0 +1,51 @@ +import sys + +PY2 = sys.version_info[0] == 2 + +_identity = lambda x: x + + +if not PY2: + text_type = str + string_types = (str,) + integer_types = (int, ) + unichr = chr + + text_to_native = lambda s, enc: s + + iterkeys = lambda d: iter(d.keys()) + itervalues = lambda d: iter(d.values()) + iteritems = lambda d: iter(d.items()) + + from io import StringIO, BytesIO + import pickle + + izip = zip + imap = map + range_type = range + + cmp = lambda a, b: (a > b) - (a < b) + +else: + text_type = unicode + string_types = (str, unicode) + integer_types = (int, long) + + text_to_native = lambda s, enc: s.encode(enc) + unichr = unichr + + iterkeys = lambda d: d.iterkeys() + itervalues = lambda d: d.itervalues() + iteritems = lambda d: d.iteritems() + + from cStringIO import StringIO as BytesIO + from StringIO import StringIO + import cPickle as pickle + + from itertools import izip, imap + range_type = xrange + + cmp = cmp + + +number_types = integer_types + (float,) diff --git a/awx/lib/site-packages/babel/core.py b/awx/lib/site-packages/babel/core.py new file mode 100644 index 0000000000..6e6e6d6194 --- /dev/null +++ b/awx/lib/site-packages/babel/core.py @@ -0,0 +1,941 @@ +# -*- coding: utf-8 -*- +""" + babel.core + ~~~~~~~~~~ + + Core locale representation and locale data access. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +import os + +from babel import localedata +from babel._compat import pickle, string_types + +__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale', + 'parse_locale'] + + +_global_data = None + + +def _raise_no_data_error(): + raise RuntimeError('The babel data files are not available. ' + 'This usually happens because you are using ' + 'a source checkout from Babel and you did ' + 'not build the data files. Just make sure ' + 'to run "python setup.py import_cldr" before ' + 'installing the library.') + + +def get_global(key): + """Return the dictionary for the given key in the global data. + + The global data is stored in the ``babel/global.dat`` file and contains + information independent of individual locales. + + >>> get_global('zone_aliases')['UTC'] + u'Etc/GMT' + >>> get_global('zone_territories')['Europe/Berlin'] + u'DE' + + .. versionadded:: 0.9 + + :param key: the data key + """ + global _global_data + if _global_data is None: + dirname = os.path.join(os.path.dirname(__file__)) + filename = os.path.join(dirname, 'global.dat') + if not os.path.isfile(filename): + _raise_no_data_error() + fileobj = open(filename, 'rb') + try: + _global_data = pickle.load(fileobj) + finally: + fileobj.close() + return _global_data.get(key, {}) + + +LOCALE_ALIASES = { + 'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ', + 'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES', + 'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES', + 'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT', + 'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV', + 'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL', + 'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI', + 'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA' +} + + +class UnknownLocaleError(Exception): + """Exception thrown when a locale is requested for which no locale data + is available. + """ + + def __init__(self, identifier): + """Create the exception. + + :param identifier: the identifier string of the unsupported locale + """ + Exception.__init__(self, 'unknown locale %r' % identifier) + + #: The identifier of the locale that could not be found. + self.identifier = identifier + + +class Locale(object): + """Representation of a specific locale. + + >>> locale = Locale('en', 'US') + >>> repr(locale) + "Locale('en', territory='US')" + >>> locale.display_name + u'English (United States)' + + A `Locale` object can also be instantiated from a raw locale string: + + >>> locale = Locale.parse('en-US', sep='-') + >>> repr(locale) + "Locale('en', territory='US')" + + `Locale` objects provide access to a collection of locale data, such as + territory and language names, number and date format patterns, and more: + + >>> locale.number_symbols['decimal'] + u'.' + + If a locale is requested for which no locale data is available, an + `UnknownLocaleError` is raised: + + >>> Locale.parse('en_DE') + Traceback (most recent call last): + ... + UnknownLocaleError: unknown locale 'en_DE' + + For more information see :rfc:`3066`. + """ + + def __init__(self, language, territory=None, script=None, variant=None): + """Initialize the locale object from the given identifier components. + + >>> locale = Locale('en', 'US') + >>> locale.language + 'en' + >>> locale.territory + 'US' + + :param language: the language code + :param territory: the territory (country or region) code + :param script: the script code + :param variant: the variant code + :raise `UnknownLocaleError`: if no locale data is available for the + requested locale + """ + #: the language code + self.language = language + #: the territory (country or region) code + self.territory = territory + #: the script code + self.script = script + #: the variant code + self.variant = variant + self.__data = None + + identifier = str(self) + if not localedata.exists(identifier): + raise UnknownLocaleError(identifier) + + @classmethod + def default(cls, category=None, aliases=LOCALE_ALIASES): + """Return the system default locale for the specified category. + + >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']: + ... os.environ[name] = '' + >>> os.environ['LANG'] = 'fr_FR.UTF-8' + >>> Locale.default('LC_MESSAGES') + Locale('fr', territory='FR') + + The following fallbacks to the variable are always considered: + + - ``LANGUAGE`` + - ``LC_ALL`` + - ``LC_CTYPE`` + - ``LANG`` + + :param category: one of the ``LC_XXX`` environment variable names + :param aliases: a dictionary of aliases for locale identifiers + """ + # XXX: use likely subtag expansion here instead of the + # aliases dictionary. + locale_string = default_locale(category, aliases=aliases) + return cls.parse(locale_string) + + @classmethod + def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES): + """Find the best match between available and requested locale strings. + + >>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT']) + Locale('de', territory='DE') + >>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de']) + Locale('de') + >>> Locale.negotiate(['de_DE', 'de'], ['en_US']) + + You can specify the character used in the locale identifiers to separate + the differnet components. This separator is applied to both lists. Also, + case is ignored in the comparison: + + >>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-') + Locale('de', territory='DE') + + :param preferred: the list of locale identifers preferred by the user + :param available: the list of locale identifiers available + :param aliases: a dictionary of aliases for locale identifiers + """ + identifier = negotiate_locale(preferred, available, sep=sep, + aliases=aliases) + if identifier: + return Locale.parse(identifier, sep=sep) + + @classmethod + def parse(cls, identifier, sep='_', resolve_likely_subtags=True): + """Create a `Locale` instance for the given locale identifier. + + >>> l = Locale.parse('de-DE', sep='-') + >>> l.display_name + u'Deutsch (Deutschland)' + + If the `identifier` parameter is not a string, but actually a `Locale` + object, that object is returned: + + >>> Locale.parse(l) + Locale('de', territory='DE') + + This also can perform resolving of likely subtags which it does + by default. This is for instance useful to figure out the most + likely locale for a territory you can use ``'und'`` as the + language tag: + + >>> Locale.parse('und_AT') + Locale('de', territory='AT') + + :param identifier: the locale identifier string + :param sep: optional component separator + :param resolve_likely_subtags: if this is specified then a locale will + have its likely subtag resolved if the + locale otherwise does not exist. For + instance ``zh_TW`` by itself is not a + locale that exists but Babel can + automatically expand it to the full + form of ``zh_hant_TW``. Note that this + expansion is only taking place if no + locale exists otherwise. For instance + there is a locale ``en`` that can exist + by itself. + :raise `ValueError`: if the string does not appear to be a valid locale + identifier + :raise `UnknownLocaleError`: if no locale data is available for the + requested locale + """ + if identifier is None: + return None + elif isinstance(identifier, Locale): + return identifier + elif not isinstance(identifier, string_types): + raise TypeError('Unxpected value for identifier: %r' % (identifier,)) + + parts = parse_locale(identifier, sep=sep) + input_id = get_locale_identifier(parts) + + def _try_load(parts): + try: + return cls(*parts) + except UnknownLocaleError: + return None + + def _try_load_reducing(parts): + # Success on first hit, return it. + locale = _try_load(parts) + if locale is not None: + return locale + + # Now try without script and variant + locale = _try_load(parts[:2]) + if locale is not None: + return locale + + locale = _try_load(parts) + if locale is not None: + return locale + if not resolve_likely_subtags: + raise UnknownLocaleError(input_id) + + # From here onwards is some very bad likely subtag resolving. This + # whole logic is not entirely correct but good enough (tm) for the + # time being. This has been added so that zh_TW does not cause + # errors for people when they upgrade. Later we should properly + # implement ICU like fuzzy locale objects and provide a way to + # maximize and minimize locale tags. + + language, territory, script, variant = parts + language = get_global('language_aliases').get(language, language) + territory = get_global('territory_aliases').get(territory, territory) + script = get_global('script_aliases').get(script, script) + variant = get_global('variant_aliases').get(variant, variant) + + if territory == 'ZZ': + territory = None + if script == 'Zzzz': + script = None + + parts = language, territory, script, variant + + # First match: try the whole identifier + new_id = get_locale_identifier(parts) + likely_subtag = get_global('likely_subtags').get(new_id) + if likely_subtag is not None: + locale = _try_load_reducing(parse_locale(likely_subtag)) + if locale is not None: + return locale + + # If we did not find anything so far, try again with a + # simplified identifier that is just the language + likely_subtag = get_global('likely_subtags').get(language) + if likely_subtag is not None: + language2, _, script2, variant2 = parse_locale(likely_subtag) + locale = _try_load_reducing((language2, territory, script2, variant2)) + if locale is not None: + return locale + + raise UnknownLocaleError(input_id) + + def __eq__(self, other): + for key in ('language', 'territory', 'script', 'variant'): + if not hasattr(other, key): + return False + return (self.language == other.language) and \ + (self.territory == other.territory) and \ + (self.script == other.script) and \ + (self.variant == other.variant) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + parameters = [''] + for key in ('territory', 'script', 'variant'): + value = getattr(self, key) + if value is not None: + parameters.append('%s=%r' % (key, value)) + parameter_string = '%r' % self.language + ', '.join(parameters) + return 'Locale(%s)' % parameter_string + + def __str__(self): + return get_locale_identifier((self.language, self.territory, + self.script, self.variant)) + + @property + def _data(self): + if self.__data is None: + self.__data = localedata.LocaleDataDict(localedata.load(str(self))) + return self.__data + + def get_display_name(self, locale=None): + """Return the display name of the locale using the given locale. + + The display name will include the language, territory, script, and + variant, if those are specified. + + >>> Locale('zh', 'CN', script='Hans').get_display_name('en') + u'Chinese (Simplified, China)' + + :param locale: the locale to use + """ + if locale is None: + locale = self + locale = Locale.parse(locale) + retval = locale.languages.get(self.language) + if self.territory or self.script or self.variant: + details = [] + if self.script: + details.append(locale.scripts.get(self.script)) + if self.territory: + details.append(locale.territories.get(self.territory)) + if self.variant: + details.append(locale.variants.get(self.variant)) + details = filter(None, details) + if details: + retval += ' (%s)' % u', '.join(details) + return retval + + display_name = property(get_display_name, doc="""\ + The localized display name of the locale. + + >>> Locale('en').display_name + u'English' + >>> Locale('en', 'US').display_name + u'English (United States)' + >>> Locale('sv').display_name + u'svenska' + + :type: `unicode` + """) + + def get_language_name(self, locale=None): + """Return the language of this locale in the given locale. + + >>> Locale('zh', 'CN', script='Hans').get_language_name('de') + u'Chinesisch' + + .. versionadded:: 1.0 + + :param locale: the locale to use + """ + if locale is None: + locale = self + locale = Locale.parse(locale) + return locale.languages.get(self.language) + + language_name = property(get_language_name, doc="""\ + The localized language name of the locale. + + >>> Locale('en', 'US').language_name + u'English' + """) + + def get_territory_name(self, locale=None): + """Return the territory name in the given locale.""" + if locale is None: + locale = self + locale = Locale.parse(locale) + return locale.territories.get(self.territory) + + territory_name = property(get_territory_name, doc="""\ + The localized territory name of the locale if available. + + >>> Locale('de', 'DE').territory_name + u'Deutschland' + """) + + def get_script_name(self, locale=None): + """Return the script name in the given locale.""" + if locale is None: + locale = self + locale = Locale.parse(locale) + return locale.scripts.get(self.script) + + script_name = property(get_script_name, doc="""\ + The localized script name of the locale if available. + + >>> Locale('ms', 'SG', script='Latn').script_name + u'Latin' + """) + + @property + def english_name(self): + """The english display name of the locale. + + >>> Locale('de').english_name + u'German' + >>> Locale('de', 'DE').english_name + u'German (Germany)' + + :type: `unicode`""" + return self.get_display_name(Locale('en')) + + #{ General Locale Display Names + + @property + def languages(self): + """Mapping of language codes to translated language names. + + >>> Locale('de', 'DE').languages['ja'] + u'Japanisch' + + See `ISO 639 `_ for + more information. + """ + return self._data['languages'] + + @property + def scripts(self): + """Mapping of script codes to translated script names. + + >>> Locale('en', 'US').scripts['Hira'] + u'Hiragana' + + See `ISO 15924 `_ + for more information. + """ + return self._data['scripts'] + + @property + def territories(self): + """Mapping of script codes to translated script names. + + >>> Locale('es', 'CO').territories['DE'] + u'Alemania' + + See `ISO 3166 `_ + for more information. + """ + return self._data['territories'] + + @property + def variants(self): + """Mapping of script codes to translated script names. + + >>> Locale('de', 'DE').variants['1901'] + u'Alte deutsche Rechtschreibung' + """ + return self._data['variants'] + + #{ Number Formatting + + @property + def currencies(self): + """Mapping of currency codes to translated currency names. This + only returns the generic form of the currency name, not the count + specific one. If an actual number is requested use the + :func:`babel.numbers.get_currency_name` function. + + >>> Locale('en').currencies['COP'] + u'Colombian Peso' + >>> Locale('de', 'DE').currencies['COP'] + u'Kolumbianischer Peso' + """ + return self._data['currency_names'] + + @property + def currency_symbols(self): + """Mapping of currency codes to symbols. + + >>> Locale('en', 'US').currency_symbols['USD'] + u'$' + >>> Locale('es', 'CO').currency_symbols['USD'] + u'US$' + """ + return self._data['currency_symbols'] + + @property + def number_symbols(self): + """Symbols used in number formatting. + + >>> Locale('fr', 'FR').number_symbols['decimal'] + u',' + """ + return self._data['number_symbols'] + + @property + def decimal_formats(self): + """Locale patterns for decimal number formatting. + + >>> Locale('en', 'US').decimal_formats[None] + + """ + return self._data['decimal_formats'] + + @property + def currency_formats(self): + """Locale patterns for currency number formatting. + + >>> print Locale('en', 'US').currency_formats[None] + + """ + return self._data['currency_formats'] + + @property + def percent_formats(self): + """Locale patterns for percent number formatting. + + >>> Locale('en', 'US').percent_formats[None] + + """ + return self._data['percent_formats'] + + @property + def scientific_formats(self): + """Locale patterns for scientific number formatting. + + >>> Locale('en', 'US').scientific_formats[None] + + """ + return self._data['scientific_formats'] + + #{ Calendar Information and Date Formatting + + @property + def periods(self): + """Locale display names for day periods (AM/PM). + + >>> Locale('en', 'US').periods['am'] + u'AM' + """ + return self._data['periods'] + + @property + def days(self): + """Locale display names for weekdays. + + >>> Locale('de', 'DE').days['format']['wide'][3] + u'Donnerstag' + """ + return self._data['days'] + + @property + def months(self): + """Locale display names for months. + + >>> Locale('de', 'DE').months['format']['wide'][10] + u'Oktober' + """ + return self._data['months'] + + @property + def quarters(self): + """Locale display names for quarters. + + >>> Locale('de', 'DE').quarters['format']['wide'][1] + u'1. Quartal' + """ + return self._data['quarters'] + + @property + def eras(self): + """Locale display names for eras. + + >>> Locale('en', 'US').eras['wide'][1] + u'Anno Domini' + >>> Locale('en', 'US').eras['abbreviated'][0] + u'BC' + """ + return self._data['eras'] + + @property + def time_zones(self): + """Locale display names for time zones. + + >>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight'] + u'British Summer Time' + >>> Locale('en', 'US').time_zones['America/St_Johns']['city'] + u'St. John\u2019s' + """ + return self._data['time_zones'] + + @property + def meta_zones(self): + """Locale display names for meta time zones. + + Meta time zones are basically groups of different Olson time zones that + have the same GMT offset and daylight savings time. + + >>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight'] + u'Central European Summer Time' + + .. versionadded:: 0.9 + """ + return self._data['meta_zones'] + + @property + def zone_formats(self): + """Patterns related to the formatting of time zones. + + >>> Locale('en', 'US').zone_formats['fallback'] + u'%(1)s (%(0)s)' + >>> Locale('pt', 'BR').zone_formats['region'] + u'Hor\\xe1rio %s' + + .. versionadded:: 0.9 + """ + return self._data['zone_formats'] + + @property + def first_week_day(self): + """The first day of a week, with 0 being Monday. + + >>> Locale('de', 'DE').first_week_day + 0 + >>> Locale('en', 'US').first_week_day + 6 + """ + return self._data['week_data']['first_day'] + + @property + def weekend_start(self): + """The day the weekend starts, with 0 being Monday. + + >>> Locale('de', 'DE').weekend_start + 5 + """ + return self._data['week_data']['weekend_start'] + + @property + def weekend_end(self): + """The day the weekend ends, with 0 being Monday. + + >>> Locale('de', 'DE').weekend_end + 6 + """ + return self._data['week_data']['weekend_end'] + + @property + def min_week_days(self): + """The minimum number of days in a week so that the week is counted as + the first week of a year or month. + + >>> Locale('de', 'DE').min_week_days + 4 + """ + return self._data['week_data']['min_days'] + + @property + def date_formats(self): + """Locale patterns for date formatting. + + >>> Locale('en', 'US').date_formats['short'] + + >>> Locale('fr', 'FR').date_formats['long'] + + """ + return self._data['date_formats'] + + @property + def time_formats(self): + """Locale patterns for time formatting. + + >>> Locale('en', 'US').time_formats['short'] + + >>> Locale('fr', 'FR').time_formats['long'] + + """ + return self._data['time_formats'] + + @property + def datetime_formats(self): + """Locale patterns for datetime formatting. + + >>> Locale('en').datetime_formats['full'] + u"{1} 'at' {0}" + >>> Locale('th').datetime_formats['medium'] + u'{1}, {0}' + """ + return self._data['datetime_formats'] + + @property + def plural_form(self): + """Plural rules for the locale. + + >>> Locale('en').plural_form(1) + 'one' + >>> Locale('en').plural_form(0) + 'other' + >>> Locale('fr').plural_form(0) + 'one' + >>> Locale('ru').plural_form(100) + 'many' + """ + return self._data['plural_form'] + + +def default_locale(category=None, aliases=LOCALE_ALIASES): + """Returns the system default locale for a given category, based on + environment variables. + + >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']: + ... os.environ[name] = '' + >>> os.environ['LANG'] = 'fr_FR.UTF-8' + >>> default_locale('LC_MESSAGES') + 'fr_FR' + + The "C" or "POSIX" pseudo-locales are treated as aliases for the + "en_US_POSIX" locale: + + >>> os.environ['LC_MESSAGES'] = 'POSIX' + >>> default_locale('LC_MESSAGES') + 'en_US_POSIX' + + The following fallbacks to the variable are always considered: + + - ``LANGUAGE`` + - ``LC_ALL`` + - ``LC_CTYPE`` + - ``LANG`` + + :param category: one of the ``LC_XXX`` environment variable names + :param aliases: a dictionary of aliases for locale identifiers + """ + varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG') + for name in filter(None, varnames): + locale = os.getenv(name) + if locale: + if name == 'LANGUAGE' and ':' in locale: + # the LANGUAGE variable may contain a colon-separated list of + # language codes; we just pick the language on the list + locale = locale.split(':')[0] + if locale in ('C', 'POSIX'): + locale = 'en_US_POSIX' + elif aliases and locale in aliases: + locale = aliases[locale] + try: + return get_locale_identifier(parse_locale(locale)) + except ValueError: + pass + + +def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES): + """Find the best match between available and requested locale strings. + + >>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT']) + 'de_DE' + >>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de']) + 'de' + + Case is ignored by the algorithm, the result uses the case of the preferred + locale identifier: + + >>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) + 'de_DE' + + >>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) + 'de_DE' + + By default, some web browsers unfortunately do not include the territory + in the locale identifier for many locales, and some don't even allow the + user to easily add the territory. So while you may prefer using qualified + locale identifiers in your web-application, they would not normally match + the language-only locale sent by such browsers. To workaround that, this + function uses a default mapping of commonly used langauge-only locale + identifiers to identifiers including the territory: + + >>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US']) + 'ja_JP' + + Some browsers even use an incorrect or outdated language code, such as "no" + for Norwegian, where the correct locale identifier would actually be "nb_NO" + (BokmÃ¥l) or "nn_NO" (Nynorsk). The aliases are intended to take care of + such cases, too: + + >>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE']) + 'nb_NO' + + You can override this default mapping by passing a different `aliases` + dictionary to this function, or you can bypass the behavior althogher by + setting the `aliases` parameter to `None`. + + :param preferred: the list of locale strings preferred by the user + :param available: the list of locale strings available + :param sep: character that separates the different parts of the locale + strings + :param aliases: a dictionary of aliases for locale identifiers + """ + available = [a.lower() for a in available if a] + for locale in preferred: + ll = locale.lower() + if ll in available: + return locale + if aliases: + alias = aliases.get(ll) + if alias: + alias = alias.replace('_', sep) + if alias.lower() in available: + return alias + parts = locale.split(sep) + if len(parts) > 1 and parts[0].lower() in available: + return parts[0] + return None + + +def parse_locale(identifier, sep='_'): + """Parse a locale identifier into a tuple of the form ``(language, + territory, script, variant)``. + + >>> parse_locale('zh_CN') + ('zh', 'CN', None, None) + >>> parse_locale('zh_Hans_CN') + ('zh', 'CN', 'Hans', None) + + The default component separator is "_", but a different separator can be + specified using the `sep` parameter: + + >>> parse_locale('zh-CN', sep='-') + ('zh', 'CN', None, None) + + If the identifier cannot be parsed into a locale, a `ValueError` exception + is raised: + + >>> parse_locale('not_a_LOCALE_String') + Traceback (most recent call last): + ... + ValueError: 'not_a_LOCALE_String' is not a valid locale identifier + + Encoding information and locale modifiers are removed from the identifier: + + >>> parse_locale('it_IT@euro') + ('it', 'IT', None, None) + >>> parse_locale('en_US.UTF-8') + ('en', 'US', None, None) + >>> parse_locale('de_DE.iso885915@euro') + ('de', 'DE', None, None) + + See :rfc:`4646` for more information. + + :param identifier: the locale identifier string + :param sep: character that separates the different components of the locale + identifier + :raise `ValueError`: if the string does not appear to be a valid locale + identifier + """ + if '.' in identifier: + # this is probably the charset/encoding, which we don't care about + identifier = identifier.split('.', 1)[0] + if '@' in identifier: + # this is a locale modifier such as @euro, which we don't care about + # either + identifier = identifier.split('@', 1)[0] + + parts = identifier.split(sep) + lang = parts.pop(0).lower() + if not lang.isalpha(): + raise ValueError('expected only letters, got %r' % lang) + + script = territory = variant = None + if parts: + if len(parts[0]) == 4 and parts[0].isalpha(): + script = parts.pop(0).title() + + if parts: + if len(parts[0]) == 2 and parts[0].isalpha(): + territory = parts.pop(0).upper() + elif len(parts[0]) == 3 and parts[0].isdigit(): + territory = parts.pop(0) + + if parts: + if len(parts[0]) == 4 and parts[0][0].isdigit() or \ + len(parts[0]) >= 5 and parts[0][0].isalpha(): + variant = parts.pop() + + if parts: + raise ValueError('%r is not a valid locale identifier' % identifier) + + return lang, territory, script, variant + + +def get_locale_identifier(tup, sep='_'): + """The reverse of :func:`parse_locale`. It creates a locale identifier out + of a ``(language, territory, script, variant)`` tuple. Items can be set to + ``None`` and trailing ``None``\s can also be left out of the tuple. + + >>> get_locale_identifier(('de', 'DE', None, '1999')) + 'de_DE_1999' + + .. versionadded:: 1.0 + + :param tup: the tuple as returned by :func:`parse_locale`. + :param sep: the separator for the identifier. + """ + tup = tuple(tup[:4]) + lang, territory, script, variant = tup + (None,) * (4 - len(tup)) + return sep.join(filter(None, (lang, script, territory, variant))) diff --git a/awx/lib/site-packages/babel/dates.py b/awx/lib/site-packages/babel/dates.py new file mode 100644 index 0000000000..72674e8aa4 --- /dev/null +++ b/awx/lib/site-packages/babel/dates.py @@ -0,0 +1,1181 @@ +# -*- coding: utf-8 -*- +""" + babel.dates + ~~~~~~~~~~~ + + Locale dependent formatting and parsing of dates and times. + + The default locale for the functions in this module is determined by the + following environment variables, in that order: + + * ``LC_TIME``, + * ``LC_ALL``, and + * ``LANG`` + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import division + +import re +import pytz as _pytz + +from datetime import date, datetime, time, timedelta +from bisect import bisect_right + +from babel.core import default_locale, get_global, Locale +from babel.util import UTC, LOCALTZ +from babel._compat import string_types, integer_types, number_types + + +LC_TIME = default_locale('LC_TIME') + +# Aliases for use in scopes where the modules are shadowed by local variables +date_ = date +datetime_ = datetime +time_ = time + + +def get_timezone(zone=None): + """Looks up a timezone by name and returns it. The timezone object + returned comes from ``pytz`` and corresponds to the `tzinfo` interface and + can be used with all of the functions of Babel that operate with dates. + + If a timezone is not known a :exc:`LookupError` is raised. If `zone` + is ``None`` a local zone object is returned. + + :param zone: the name of the timezone to look up. If a timezone object + itself is passed in, mit's returned unchanged. + """ + if zone is None: + return LOCALTZ + if not isinstance(zone, string_types): + return zone + try: + return _pytz.timezone(zone) + except _pytz.UnknownTimeZoneError: + raise LookupError('Unknown timezone %s' % zone) + + +def get_next_timezone_transition(zone=None, dt=None): + """Given a timezone it will return a :class:`TimezoneTransition` object + that holds the information about the next timezone transition that's going + to happen. For instance this can be used to detect when the next DST + change is going to happen and how it looks like. + + The transition is calculated relative to the given datetime object. The + next transition that follows the date is used. If a transition cannot + be found the return value will be `None`. + + Transition information can only be provided for timezones returned by + the :func:`get_timezone` function. + + :param zone: the timezone for which the transition should be looked up. + If not provided the local timezone is used. + :param dt: the date after which the next transition should be found. + If not given the current time is assumed. + """ + zone = get_timezone(zone) + if dt is None: + dt = datetime.utcnow() + else: + dt = dt.replace(tzinfo=None) + + if not hasattr(zone, '_utc_transition_times'): + raise TypeError('Given timezone does not have UTC transition ' + 'times. This can happen because the operating ' + 'system fallback local timezone is used or a ' + 'custom timezone object') + + try: + idx = max(0, bisect_right(zone._utc_transition_times, dt)) + old_trans = zone._transition_info[idx - 1] + new_trans = zone._transition_info[idx] + old_tz = zone._tzinfos[old_trans] + new_tz = zone._tzinfos[new_trans] + except (LookupError, ValueError): + return None + + return TimezoneTransition( + activates=zone._utc_transition_times[idx], + from_tzinfo=old_tz, + to_tzinfo=new_tz, + reference_date=dt + ) + + +class TimezoneTransition(object): + """A helper object that represents the return value from + :func:`get_next_timezone_transition`. + """ + + def __init__(self, activates, from_tzinfo, to_tzinfo, reference_date=None): + #: the time of the activation of the timezone transition in UTC. + self.activates = activates + #: the timezone from where the transition starts. + self.from_tzinfo = from_tzinfo + #: the timezone for after the transition. + self.to_tzinfo = to_tzinfo + #: the reference date that was provided. This is the `dt` parameter + #: to the :func:`get_next_timezone_transition`. + self.reference_date = reference_date + + @property + def from_tz(self): + """The name of the timezone before the transition.""" + return self.from_tzinfo._tzname + + @property + def to_tz(self): + """The name of the timezone after the transition.""" + return self.to_tzinfo._tzname + + @property + def from_offset(self): + """The UTC offset in seconds before the transition.""" + return int(self.from_tzinfo._utcoffset.total_seconds()) + + @property + def to_offset(self): + """The UTC offset in seconds after the transition.""" + return int(self.to_tzinfo._utcoffset.total_seconds()) + + def __repr__(self): + return ' %s (%s)>' % ( + self.from_tz, + self.to_tz, + self.activates, + ) + + +def get_period_names(locale=LC_TIME): + """Return the names for day periods (AM/PM) used by the locale. + + >>> get_period_names(locale='en_US')['am'] + u'AM' + + :param locale: the `Locale` object, or a locale string + """ + return Locale.parse(locale).periods + + +def get_day_names(width='wide', context='format', locale=LC_TIME): + """Return the day names used by the locale for the specified format. + + >>> get_day_names('wide', locale='en_US')[1] + u'Tuesday' + >>> get_day_names('abbreviated', locale='es')[1] + u'mar' + >>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1] + u'D' + + :param width: the width to use, one of "wide", "abbreviated", or "narrow" + :param context: the context, either "format" or "stand-alone" + :param locale: the `Locale` object, or a locale string + """ + return Locale.parse(locale).days[context][width] + + +def get_month_names(width='wide', context='format', locale=LC_TIME): + """Return the month names used by the locale for the specified format. + + >>> get_month_names('wide', locale='en_US')[1] + u'January' + >>> get_month_names('abbreviated', locale='es')[1] + u'ene' + >>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1] + u'J' + + :param width: the width to use, one of "wide", "abbreviated", or "narrow" + :param context: the context, either "format" or "stand-alone" + :param locale: the `Locale` object, or a locale string + """ + return Locale.parse(locale).months[context][width] + + +def get_quarter_names(width='wide', context='format', locale=LC_TIME): + """Return the quarter names used by the locale for the specified format. + + >>> get_quarter_names('wide', locale='en_US')[1] + u'1st quarter' + >>> get_quarter_names('abbreviated', locale='de_DE')[1] + u'Q1' + + :param width: the width to use, one of "wide", "abbreviated", or "narrow" + :param context: the context, either "format" or "stand-alone" + :param locale: the `Locale` object, or a locale string + """ + return Locale.parse(locale).quarters[context][width] + + +def get_era_names(width='wide', locale=LC_TIME): + """Return the era names used by the locale for the specified format. + + >>> get_era_names('wide', locale='en_US')[1] + u'Anno Domini' + >>> get_era_names('abbreviated', locale='de_DE')[1] + u'n. Chr.' + + :param width: the width to use, either "wide", "abbreviated", or "narrow" + :param locale: the `Locale` object, or a locale string + """ + return Locale.parse(locale).eras[width] + + +def get_date_format(format='medium', locale=LC_TIME): + """Return the date formatting patterns used by the locale for the specified + format. + + >>> get_date_format(locale='en_US') + + >>> get_date_format('full', locale='de_DE') + + + :param format: the format to use, one of "full", "long", "medium", or + "short" + :param locale: the `Locale` object, or a locale string + """ + return Locale.parse(locale).date_formats[format] + + +def get_datetime_format(format='medium', locale=LC_TIME): + """Return the datetime formatting patterns used by the locale for the + specified format. + + >>> get_datetime_format(locale='en_US') + u'{1}, {0}' + + :param format: the format to use, one of "full", "long", "medium", or + "short" + :param locale: the `Locale` object, or a locale string + """ + patterns = Locale.parse(locale).datetime_formats + if format not in patterns: + format = None + return patterns[format] + + +def get_time_format(format='medium', locale=LC_TIME): + """Return the time formatting patterns used by the locale for the specified + format. + + >>> get_time_format(locale='en_US') + + >>> get_time_format('full', locale='de_DE') + + + :param format: the format to use, one of "full", "long", "medium", or + "short" + :param locale: the `Locale` object, or a locale string + """ + return Locale.parse(locale).time_formats[format] + + +def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME): + """Return the timezone associated with the given `datetime` object formatted + as string indicating the offset from GMT. + + >>> dt = datetime(2007, 4, 1, 15, 30) + >>> get_timezone_gmt(dt, locale='en') + u'GMT+00:00' + + >>> tz = get_timezone('America/Los_Angeles') + >>> dt = datetime(2007, 4, 1, 15, 30, tzinfo=tz) + >>> get_timezone_gmt(dt, locale='en') + u'GMT-08:00' + >>> get_timezone_gmt(dt, 'short', locale='en') + u'-0800' + + The long format depends on the locale, for example in France the acronym + UTC string is used instead of GMT: + + >>> get_timezone_gmt(dt, 'long', locale='fr_FR') + u'UTC-08:00' + + .. versionadded:: 0.9 + + :param datetime: the ``datetime`` object; if `None`, the current date and + time in UTC is used + :param width: either "long" or "short" + :param locale: the `Locale` object, or a locale string + """ + if datetime is None: + datetime = datetime_.utcnow() + elif isinstance(datetime, integer_types): + datetime = datetime_.utcfromtimestamp(datetime).time() + if datetime.tzinfo is None: + datetime = datetime.replace(tzinfo=UTC) + locale = Locale.parse(locale) + + offset = datetime.tzinfo.utcoffset(datetime) + seconds = offset.days * 24 * 60 * 60 + offset.seconds + hours, seconds = divmod(seconds, 3600) + if width == 'short': + pattern = u'%+03d%02d' + else: + pattern = locale.zone_formats['gmt'] % '%+03d:%02d' + return pattern % (hours, seconds // 60) + + +def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME): + """Return a representation of the given timezone using "location format". + + The result depends on both the local display name of the country and the + city associated with the time zone: + + >>> tz = get_timezone('America/St_Johns') + >>> get_timezone_location(tz, locale='de_DE') + u"Kanada (St. John's) Zeit" + >>> tz = get_timezone('America/Mexico_City') + >>> get_timezone_location(tz, locale='de_DE') + u'Mexiko (Mexiko-Stadt) Zeit' + + If the timezone is associated with a country that uses only a single + timezone, just the localized country name is returned: + + >>> tz = get_timezone('Europe/Berlin') + >>> get_timezone_name(tz, locale='de_DE') + u'Mitteleurop\\xe4ische Zeit' + + .. versionadded:: 0.9 + + :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines + the timezone; if `None`, the current date and time in + UTC is assumed + :param locale: the `Locale` object, or a locale string + :return: the localized timezone name using location format + """ + if dt_or_tzinfo is None: + dt = datetime.now() + tzinfo = LOCALTZ + elif isinstance(dt_or_tzinfo, string_types): + dt = None + tzinfo = get_timezone(dt_or_tzinfo) + elif isinstance(dt_or_tzinfo, integer_types): + dt = None + tzinfo = UTC + elif isinstance(dt_or_tzinfo, (datetime, time)): + dt = dt_or_tzinfo + if dt.tzinfo is not None: + tzinfo = dt.tzinfo + else: + tzinfo = UTC + else: + dt = None + tzinfo = dt_or_tzinfo + locale = Locale.parse(locale) + + if hasattr(tzinfo, 'zone'): + zone = tzinfo.zone + else: + zone = tzinfo.tzname(dt or datetime.utcnow()) + + # Get the canonical time-zone code + zone = get_global('zone_aliases').get(zone, zone) + + info = locale.time_zones.get(zone, {}) + + # Otherwise, if there is only one timezone for the country, return the + # localized country name + region_format = locale.zone_formats['region'] + territory = get_global('zone_territories').get(zone) + if territory not in locale.territories: + territory = 'ZZ' # invalid/unknown + territory_name = locale.territories[territory] + if territory and len(get_global('territory_zones').get(territory, [])) == 1: + return region_format % (territory_name) + + # Otherwise, include the city in the output + fallback_format = locale.zone_formats['fallback'] + if 'city' in info: + city_name = info['city'] + else: + metazone = get_global('meta_zones').get(zone) + metazone_info = locale.meta_zones.get(metazone, {}) + if 'city' in metazone_info: + city_name = metazone_info['city'] + elif '/' in zone: + city_name = zone.split('/', 1)[1].replace('_', ' ') + else: + city_name = zone.replace('_', ' ') + + return region_format % (fallback_format % { + '0': city_name, + '1': territory_name + }) + + +def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False, + locale=LC_TIME, zone_variant=None): + r"""Return the localized display name for the given timezone. The timezone + may be specified using a ``datetime`` or `tzinfo` object. + + >>> dt = time(15, 30, tzinfo=get_timezone('America/Los_Angeles')) + >>> get_timezone_name(dt, locale='en_US') + u'Pacific Standard Time' + >>> get_timezone_name(dt, width='short', locale='en_US') + u'PST' + + If this function gets passed only a `tzinfo` object and no concrete + `datetime`, the returned display name is indenpendent of daylight savings + time. This can be used for example for selecting timezones, or to set the + time of events that recur across DST changes: + + >>> tz = get_timezone('America/Los_Angeles') + >>> get_timezone_name(tz, locale='en_US') + u'Pacific Time' + >>> get_timezone_name(tz, 'short', locale='en_US') + u'PT' + + If no localized display name for the timezone is available, and the timezone + is associated with a country that uses only a single timezone, the name of + that country is returned, formatted according to the locale: + + >>> tz = get_timezone('Europe/Berlin') + >>> get_timezone_name(tz, locale='de_DE') + u'Mitteleurop\xe4ische Zeit' + >>> get_timezone_name(tz, locale='pt_BR') + u'Hor\xe1rio da Europa Central' + + On the other hand, if the country uses multiple timezones, the city is also + included in the representation: + + >>> tz = get_timezone('America/St_Johns') + >>> get_timezone_name(tz, locale='de_DE') + u'Neufundland-Zeit' + + Note that short format is currently not supported for all timezones and + all locales. This is partially because not every timezone has a short + code in every locale. In that case it currently falls back to the long + format. + + For more information see `LDML Appendix J: Time Zone Display Names + `_ + + .. versionadded:: 0.9 + + .. versionchanged:: 1.0 + Added `zone_variant` support. + + :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines + the timezone; if a ``tzinfo`` object is used, the + resulting display name will be generic, i.e. + independent of daylight savings time; if `None`, the + current date in UTC is assumed + :param width: either "long" or "short" + :param uncommon: deprecated and ignored + :param zone_variant: defines the zone variation to return. By default the + variation is defined from the datetime object + passed in. If no datetime object is passed in, the + ``'generic'`` variation is assumed. The following + values are valid: ``'generic'``, ``'daylight'`` and + ``'standard'``. + :param locale: the `Locale` object, or a locale string + """ + if dt_or_tzinfo is None: + dt = datetime.now() + tzinfo = LOCALTZ + elif isinstance(dt_or_tzinfo, string_types): + dt = None + tzinfo = get_timezone(dt_or_tzinfo) + elif isinstance(dt_or_tzinfo, integer_types): + dt = None + tzinfo = UTC + elif isinstance(dt_or_tzinfo, (datetime, time)): + dt = dt_or_tzinfo + if dt.tzinfo is not None: + tzinfo = dt.tzinfo + else: + tzinfo = UTC + else: + dt = None + tzinfo = dt_or_tzinfo + locale = Locale.parse(locale) + + if hasattr(tzinfo, 'zone'): + zone = tzinfo.zone + else: + zone = tzinfo.tzname(dt) + + if zone_variant is None: + if dt is None: + zone_variant = 'generic' + else: + dst = tzinfo.dst(dt) + if dst: + zone_variant = 'daylight' + else: + zone_variant = 'standard' + else: + if zone_variant not in ('generic', 'standard', 'daylight'): + raise ValueError('Invalid zone variation') + + # Get the canonical time-zone code + zone = get_global('zone_aliases').get(zone, zone) + + info = locale.time_zones.get(zone, {}) + # Try explicitly translated zone names first + if width in info: + if zone_variant in info[width]: + return info[width][zone_variant] + + metazone = get_global('meta_zones').get(zone) + if metazone: + metazone_info = locale.meta_zones.get(metazone, {}) + if width in metazone_info: + if zone_variant in metazone_info[width]: + return metazone_info[width][zone_variant] + + # If we have a concrete datetime, we assume that the result can't be + # independent of daylight savings time, so we return the GMT offset + if dt is not None: + return get_timezone_gmt(dt, width=width, locale=locale) + + return get_timezone_location(dt_or_tzinfo, locale=locale) + + +def format_date(date=None, format='medium', locale=LC_TIME): + """Return a date formatted according to the given pattern. + + >>> d = date(2007, 04, 01) + >>> format_date(d, locale='en_US') + u'Apr 1, 2007' + >>> format_date(d, format='full', locale='de_DE') + u'Sonntag, 1. April 2007' + + If you don't want to use the locale default formats, you can specify a + custom date pattern: + + >>> format_date(d, "EEE, MMM d, ''yy", locale='en') + u"Sun, Apr 1, '07" + + :param date: the ``date`` or ``datetime`` object; if `None`, the current + date is used + :param format: one of "full", "long", "medium", or "short", or a custom + date/time pattern + :param locale: a `Locale` object or a locale identifier + """ + if date is None: + date = date_.today() + elif isinstance(date, datetime): + date = date.date() + + locale = Locale.parse(locale) + if format in ('full', 'long', 'medium', 'short'): + format = get_date_format(format, locale=locale) + pattern = parse_pattern(format) + return pattern.apply(date, locale) + + +def format_datetime(datetime=None, format='medium', tzinfo=None, + locale=LC_TIME): + r"""Return a date formatted according to the given pattern. + + >>> dt = datetime(2007, 04, 01, 15, 30) + >>> format_datetime(dt, locale='en_US') + u'Apr 1, 2007, 3:30:00 PM' + + For any pattern requiring the display of the time-zone, the third-party + ``pytz`` package is needed to explicitly specify the time-zone: + + >>> format_datetime(dt, 'full', tzinfo=get_timezone('Europe/Paris'), + ... locale='fr_FR') + u'dimanche 1 avril 2007 17:30:00 heure avanc\xe9e d\u2019Europe centrale' + >>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz", + ... tzinfo=get_timezone('US/Eastern'), locale='en') + u'2007.04.01 AD at 11:30:00 EDT' + + :param datetime: the `datetime` object; if `None`, the current date and + time is used + :param format: one of "full", "long", "medium", or "short", or a custom + date/time pattern + :param tzinfo: the timezone to apply to the time for display + :param locale: a `Locale` object or a locale identifier + """ + if datetime is None: + datetime = datetime_.utcnow() + elif isinstance(datetime, number_types): + datetime = datetime_.utcfromtimestamp(datetime) + elif isinstance(datetime, time): + datetime = datetime_.combine(date.today(), datetime) + if datetime.tzinfo is None: + datetime = datetime.replace(tzinfo=UTC) + if tzinfo is not None: + datetime = datetime.astimezone(get_timezone(tzinfo)) + if hasattr(tzinfo, 'normalize'): # pytz + datetime = tzinfo.normalize(datetime) + + locale = Locale.parse(locale) + if format in ('full', 'long', 'medium', 'short'): + return get_datetime_format(format, locale=locale) \ + .replace("'", "") \ + .replace('{0}', format_time(datetime, format, tzinfo=None, + locale=locale)) \ + .replace('{1}', format_date(datetime, format, locale=locale)) + else: + return parse_pattern(format).apply(datetime, locale) + + +def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME): + r"""Return a time formatted according to the given pattern. + + >>> t = time(15, 30) + >>> format_time(t, locale='en_US') + u'3:30:00 PM' + >>> format_time(t, format='short', locale='de_DE') + u'15:30' + + If you don't want to use the locale default formats, you can specify a + custom time pattern: + + >>> format_time(t, "hh 'o''clock' a", locale='en') + u"03 o'clock PM" + + For any pattern requiring the display of the time-zone a + timezone has to be specified explicitly: + + >>> t = datetime(2007, 4, 1, 15, 30) + >>> tzinfo = get_timezone('Europe/Paris') + >>> t = tzinfo.localize(t) + >>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR') + u'15:30:00 heure avanc\xe9e d\u2019Europe centrale' + >>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=get_timezone('US/Eastern'), + ... locale='en') + u"09 o'clock AM, Eastern Daylight Time" + + As that example shows, when this function gets passed a + ``datetime.datetime`` value, the actual time in the formatted string is + adjusted to the timezone specified by the `tzinfo` parameter. If the + ``datetime`` is "naive" (i.e. it has no associated timezone information), + it is assumed to be in UTC. + + These timezone calculations are **not** performed if the value is of type + ``datetime.time``, as without date information there's no way to determine + what a given time would translate to in a different timezone without + information about whether daylight savings time is in effect or not. This + means that time values are left as-is, and the value of the `tzinfo` + parameter is only used to display the timezone name if needed: + + >>> t = time(15, 30) + >>> format_time(t, format='full', tzinfo=get_timezone('Europe/Paris'), + ... locale='fr_FR') + u'15:30:00 heure normale de l\u2019Europe centrale' + >>> format_time(t, format='full', tzinfo=get_timezone('US/Eastern'), + ... locale='en_US') + u'3:30:00 PM Eastern Standard Time' + + :param time: the ``time`` or ``datetime`` object; if `None`, the current + time in UTC is used + :param format: one of "full", "long", "medium", or "short", or a custom + date/time pattern + :param tzinfo: the time-zone to apply to the time for display + :param locale: a `Locale` object or a locale identifier + """ + if time is None: + time = datetime.utcnow() + elif isinstance(time, number_types): + time = datetime.utcfromtimestamp(time) + if time.tzinfo is None: + time = time.replace(tzinfo=UTC) + if isinstance(time, datetime): + if tzinfo is not None: + time = time.astimezone(tzinfo) + if hasattr(tzinfo, 'normalize'): # pytz + time = tzinfo.normalize(time) + time = time.timetz() + elif tzinfo is not None: + time = time.replace(tzinfo=tzinfo) + + locale = Locale.parse(locale) + if format in ('full', 'long', 'medium', 'short'): + format = get_time_format(format, locale=locale) + return parse_pattern(format).apply(time, locale) + + +TIMEDELTA_UNITS = ( + ('year', 3600 * 24 * 365), + ('month', 3600 * 24 * 30), + ('week', 3600 * 24 * 7), + ('day', 3600 * 24), + ('hour', 3600), + ('minute', 60), + ('second', 1) +) + + +def format_timedelta(delta, granularity='second', threshold=.85, + add_direction=False, format='medium', + locale=LC_TIME): + """Return a time delta according to the rules of the given locale. + + >>> format_timedelta(timedelta(weeks=12), locale='en_US') + u'3 months' + >>> format_timedelta(timedelta(seconds=1), locale='es') + u'1 segundo' + + The granularity parameter can be provided to alter the lowest unit + presented, which defaults to a second. + + >>> format_timedelta(timedelta(hours=3), granularity='day', + ... locale='en_US') + u'1 day' + + The threshold parameter can be used to determine at which value the + presentation switches to the next higher unit. A higher threshold factor + means the presentation will switch later. For example: + + >>> format_timedelta(timedelta(hours=23), threshold=0.9, locale='en_US') + u'1 day' + >>> format_timedelta(timedelta(hours=23), threshold=1.1, locale='en_US') + u'23 hours' + + In addition directional information can be provided that informs + the user if the date is in the past or in the future: + + >>> format_timedelta(timedelta(hours=1), add_direction=True) + u'In 1 hour' + >>> format_timedelta(timedelta(hours=-1), add_direction=True) + u'1 hour ago' + + :param delta: a ``timedelta`` object representing the time difference to + format, or the delta in seconds as an `int` value + :param granularity: determines the smallest unit that should be displayed, + the value can be one of "year", "month", "week", "day", + "hour", "minute" or "second" + :param threshold: factor that determines at which point the presentation + switches to the next higher unit + :param add_direction: if this flag is set to `True` the return value will + include directional information. For instance a + positive timedelta will include the information about + it being in the future, a negative will be information + about the value being in the past. + :param format: the format (currently only "medium" and "short" are supported) + :param locale: a `Locale` object or a locale identifier + """ + if format not in ('short', 'medium'): + raise TypeError('Format can only be one of "short" or "medium"') + if isinstance(delta, timedelta): + seconds = int((delta.days * 86400) + delta.seconds) + else: + seconds = delta + locale = Locale.parse(locale) + + def _iter_choices(unit): + if add_direction: + if seconds >= 0: + yield unit + '-future' + else: + yield unit + '-past' + yield unit + ':' + format + yield unit + + for unit, secs_per_unit in TIMEDELTA_UNITS: + value = abs(seconds) / secs_per_unit + if value >= threshold or unit == granularity: + if unit == granularity and value > 0: + value = max(1, value) + value = int(round(value)) + plural_form = locale.plural_form(value) + pattern = None + for choice in _iter_choices(unit): + patterns = locale._data['unit_patterns'].get(choice) + if patterns is not None: + pattern = patterns[plural_form] + break + # This really should not happen + if pattern is None: + return u'' + return pattern.replace('{0}', str(value)) + + return u'' + + +def parse_date(string, locale=LC_TIME): + """Parse a date from a string. + + This function uses the date format for the locale as a hint to determine + the order in which the date fields appear in the string. + + >>> parse_date('4/1/04', locale='en_US') + datetime.date(2004, 4, 1) + >>> parse_date('01.04.2004', locale='de_DE') + datetime.date(2004, 4, 1) + + :param string: the string containing the date + :param locale: a `Locale` object or a locale identifier + """ + # TODO: try ISO format first? + format = get_date_format(locale=locale).pattern.lower() + year_idx = format.index('y') + month_idx = format.index('m') + if month_idx < 0: + month_idx = format.index('l') + day_idx = format.index('d') + + indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')] + indexes.sort() + indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)]) + + # FIXME: this currently only supports numbers, but should also support month + # names, both in the requested locale, and english + + numbers = re.findall('(\d+)', string) + year = numbers[indexes['Y']] + if len(year) == 2: + year = 2000 + int(year) + else: + year = int(year) + month = int(numbers[indexes['M']]) + day = int(numbers[indexes['D']]) + if month > 12: + month, day = day, month + return date(year, month, day) + + +def parse_time(string, locale=LC_TIME): + """Parse a time from a string. + + This function uses the time format for the locale as a hint to determine + the order in which the time fields appear in the string. + + >>> parse_time('15:30:00', locale='en_US') + datetime.time(15, 30) + + :param string: the string containing the time + :param locale: a `Locale` object or a locale identifier + :return: the parsed time + :rtype: `time` + """ + # TODO: try ISO format first? + format = get_time_format(locale=locale).pattern.lower() + hour_idx = format.index('h') + if hour_idx < 0: + hour_idx = format.index('k') + min_idx = format.index('m') + sec_idx = format.index('s') + + indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')] + indexes.sort() + indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)]) + + # FIXME: support 12 hour clock, and 0-based hour specification + # and seconds should be optional, maybe minutes too + # oh, and time-zones, of course + + numbers = re.findall('(\d+)', string) + hour = int(numbers[indexes['H']]) + minute = int(numbers[indexes['M']]) + second = int(numbers[indexes['S']]) + return time(hour, minute, second) + + +class DateTimePattern(object): + + def __init__(self, pattern, format): + self.pattern = pattern + self.format = format + + def __repr__(self): + return '<%s %r>' % (type(self).__name__, self.pattern) + + def __unicode__(self): + return self.pattern + + def __mod__(self, other): + if type(other) is not DateTimeFormat: + return NotImplemented + return self.format % other + + def apply(self, datetime, locale): + return self % DateTimeFormat(datetime, locale) + + +class DateTimeFormat(object): + + def __init__(self, value, locale): + assert isinstance(value, (date, datetime, time)) + if isinstance(value, (datetime, time)) and value.tzinfo is None: + value = value.replace(tzinfo=UTC) + self.value = value + self.locale = Locale.parse(locale) + + def __getitem__(self, name): + char = name[0] + num = len(name) + if char == 'G': + return self.format_era(char, num) + elif char in ('y', 'Y', 'u'): + return self.format_year(char, num) + elif char in ('Q', 'q'): + return self.format_quarter(char, num) + elif char in ('M', 'L'): + return self.format_month(char, num) + elif char in ('w', 'W'): + return self.format_week(char, num) + elif char == 'd': + return self.format(self.value.day, num) + elif char == 'D': + return self.format_day_of_year(num) + elif char == 'F': + return self.format_day_of_week_in_month() + elif char in ('E', 'e', 'c'): + return self.format_weekday(char, num) + elif char == 'a': + return self.format_period(char) + elif char == 'h': + if self.value.hour % 12 == 0: + return self.format(12, num) + else: + return self.format(self.value.hour % 12, num) + elif char == 'H': + return self.format(self.value.hour, num) + elif char == 'K': + return self.format(self.value.hour % 12, num) + elif char == 'k': + if self.value.hour == 0: + return self.format(24, num) + else: + return self.format(self.value.hour, num) + elif char == 'm': + return self.format(self.value.minute, num) + elif char == 's': + return self.format(self.value.second, num) + elif char == 'S': + return self.format_frac_seconds(num) + elif char == 'A': + return self.format_milliseconds_in_day(num) + elif char in ('z', 'Z', 'v', 'V'): + return self.format_timezone(char, num) + else: + raise KeyError('Unsupported date/time field %r' % char) + + def format_era(self, char, num): + width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)] + era = int(self.value.year >= 0) + return get_era_names(width, self.locale)[era] + + def format_year(self, char, num): + value = self.value.year + if char.isupper(): + week = self.get_week_number(self.get_day_of_year()) + if week == 0: + value -= 1 + year = self.format(value, num) + if num == 2: + year = year[-2:] + return year + + def format_quarter(self, char, num): + quarter = (self.value.month - 1) // 3 + 1 + if num <= 2: + return ('%%0%dd' % num) % quarter + width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] + context = {'Q': 'format', 'q': 'stand-alone'}[char] + return get_quarter_names(width, context, self.locale)[quarter] + + def format_month(self, char, num): + if num <= 2: + return ('%%0%dd' % num) % self.value.month + width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] + context = {'M': 'format', 'L': 'stand-alone'}[char] + return get_month_names(width, context, self.locale)[self.value.month] + + def format_week(self, char, num): + if char.islower(): # week of year + day_of_year = self.get_day_of_year() + week = self.get_week_number(day_of_year) + if week == 0: + date = self.value - timedelta(days=day_of_year) + week = self.get_week_number(self.get_day_of_year(date), + date.weekday()) + return self.format(week, num) + else: # week of month + week = self.get_week_number(self.value.day) + if week == 0: + date = self.value - timedelta(days=self.value.day) + week = self.get_week_number(date.day, date.weekday()) + pass + return '%d' % week + + def format_weekday(self, char, num): + if num < 3: + if char.islower(): + value = 7 - self.locale.first_week_day + self.value.weekday() + return self.format(value % 7 + 1, num) + num = 3 + weekday = self.value.weekday() + width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] + context = {3: 'format', 4: 'format', 5: 'stand-alone'}[num] + return get_day_names(width, context, self.locale)[weekday] + + def format_day_of_year(self, num): + return self.format(self.get_day_of_year(), num) + + def format_day_of_week_in_month(self): + return '%d' % ((self.value.day - 1) // 7 + 1) + + def format_period(self, char): + period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)] + return get_period_names(locale=self.locale)[period] + + def format_frac_seconds(self, num): + value = str(self.value.microsecond) + return self.format(round(float('.%s' % value), num) * 10**num, num) + + def format_milliseconds_in_day(self, num): + msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \ + self.value.minute * 60000 + self.value.hour * 3600000 + return self.format(msecs, num) + + def format_timezone(self, char, num): + width = {3: 'short', 4: 'long'}[max(3, num)] + if char == 'z': + return get_timezone_name(self.value, width, locale=self.locale) + elif char == 'Z': + return get_timezone_gmt(self.value, width, locale=self.locale) + elif char == 'v': + return get_timezone_name(self.value.tzinfo, width, + locale=self.locale) + elif char == 'V': + if num == 1: + return get_timezone_name(self.value.tzinfo, width, + uncommon=True, locale=self.locale) + return get_timezone_location(self.value.tzinfo, locale=self.locale) + + def format(self, value, length): + return ('%%0%dd' % length) % value + + def get_day_of_year(self, date=None): + if date is None: + date = self.value + return (date - date.replace(month=1, day=1)).days + 1 + + def get_week_number(self, day_of_period, day_of_week=None): + """Return the number of the week of a day within a period. This may be + the week number in a year or the week number in a month. + + Usually this will return a value equal to or greater than 1, but if the + first week of the period is so short that it actually counts as the last + week of the previous period, this function will return 0. + + >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE')) + >>> format.get_week_number(6) + 1 + + >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US')) + >>> format.get_week_number(6) + 2 + + :param day_of_period: the number of the day in the period (usually + either the day of month or the day of year) + :param day_of_week: the week day; if ommitted, the week day of the + current date is assumed + """ + if day_of_week is None: + day_of_week = self.value.weekday() + first_day = (day_of_week - self.locale.first_week_day - + day_of_period + 1) % 7 + if first_day < 0: + first_day += 7 + week_number = (day_of_period + first_day - 1) // 7 + if 7 - first_day >= self.locale.min_week_days: + week_number += 1 + return week_number + + +PATTERN_CHARS = { + 'G': [1, 2, 3, 4, 5], # era + 'y': None, 'Y': None, 'u': None, # year + 'Q': [1, 2, 3, 4], 'q': [1, 2, 3, 4], # quarter + 'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month + 'w': [1, 2], 'W': [1], # week + 'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day + 'E': [1, 2, 3, 4, 5], 'e': [1, 2, 3, 4, 5], 'c': [1, 3, 4, 5], # week day + 'a': [1], # period + 'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour + 'm': [1, 2], # minute + 's': [1, 2], 'S': None, 'A': None, # second + 'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4], 'v': [1, 4], 'V': [1, 4] # zone +} + + +def parse_pattern(pattern): + """Parse date, time, and datetime format patterns. + + >>> parse_pattern("MMMMd").format + u'%(MMMM)s%(d)s' + >>> parse_pattern("MMM d, yyyy").format + u'%(MMM)s %(d)s, %(yyyy)s' + + Pattern can contain literal strings in single quotes: + + >>> parse_pattern("H:mm' Uhr 'z").format + u'%(H)s:%(mm)s Uhr %(z)s' + + An actual single quote can be used by using two adjacent single quote + characters: + + >>> parse_pattern("hh' o''clock'").format + u"%(hh)s o'clock" + + :param pattern: the formatting pattern to parse + """ + if type(pattern) is DateTimePattern: + return pattern + + result = [] + quotebuf = None + charbuf = [] + fieldchar = [''] + fieldnum = [0] + + def append_chars(): + result.append(''.join(charbuf).replace('%', '%%')) + del charbuf[:] + + def append_field(): + limit = PATTERN_CHARS[fieldchar[0]] + if limit and fieldnum[0] not in limit: + raise ValueError('Invalid length for field: %r' + % (fieldchar[0] * fieldnum[0])) + result.append('%%(%s)s' % (fieldchar[0] * fieldnum[0])) + fieldchar[0] = '' + fieldnum[0] = 0 + + for idx, char in enumerate(pattern.replace("''", '\0')): + if quotebuf is None: + if char == "'": # quote started + if fieldchar[0]: + append_field() + elif charbuf: + append_chars() + quotebuf = [] + elif char in PATTERN_CHARS: + if charbuf: + append_chars() + if char == fieldchar[0]: + fieldnum[0] += 1 + else: + if fieldchar[0]: + append_field() + fieldchar[0] = char + fieldnum[0] = 1 + else: + if fieldchar[0]: + append_field() + charbuf.append(char) + + elif quotebuf is not None: + if char == "'": # end of quote + charbuf.extend(quotebuf) + quotebuf = None + else: # inside quote + quotebuf.append(char) + + if fieldchar[0]: + append_field() + elif charbuf: + append_chars() + + return DateTimePattern(pattern, u''.join(result).replace('\0', "'")) diff --git a/awx/lib/site-packages/babel/global.dat b/awx/lib/site-packages/babel/global.dat new file mode 100644 index 0000000000..82cbbae121 Binary files /dev/null and b/awx/lib/site-packages/babel/global.dat differ diff --git a/awx/lib/site-packages/babel/localedata.py b/awx/lib/site-packages/babel/localedata.py new file mode 100644 index 0000000000..88883ac80e --- /dev/null +++ b/awx/lib/site-packages/babel/localedata.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +""" + babel.localedata + ~~~~~~~~~~~~~~~~ + + Low-level locale data access. + + :note: The `Locale` class, which uses this module under the hood, provides a + more convenient interface for accessing the locale data. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +import os +import threading +from collections import MutableMapping + +from babel._compat import pickle + + +_cache = {} +_cache_lock = threading.RLock() +_dirname = os.path.join(os.path.dirname(__file__), 'localedata') + + +def exists(name): + """Check whether locale data is available for the given locale. Ther + return value is `True` if it exists, `False` otherwise. + + :param name: the locale identifier string + """ + if name in _cache: + return True + return os.path.exists(os.path.join(_dirname, '%s.dat' % name)) + + +def locale_identifiers(): + """Return a list of all locale identifiers for which locale data is + available. + + .. versionadded:: 0.8.1 + + :return: a list of locale identifiers (strings) + """ + return [stem for stem, extension in [ + os.path.splitext(filename) for filename in os.listdir(_dirname) + ] if extension == '.dat' and stem != 'root'] + + +def load(name, merge_inherited=True): + """Load the locale data for the given locale. + + The locale data is a dictionary that contains much of the data defined by + the Common Locale Data Repository (CLDR). This data is stored as a + collection of pickle files inside the ``babel`` package. + + >>> d = load('en_US') + >>> d['languages']['sv'] + u'Swedish' + + Note that the results are cached, and subsequent requests for the same + locale return the same dictionary: + + >>> d1 = load('en_US') + >>> d2 = load('en_US') + >>> d1 is d2 + True + + :param name: the locale identifier string (or "root") + :param merge_inherited: whether the inherited data should be merged into + the data of the requested locale + :raise `IOError`: if no locale data file is found for the given locale + identifer, or one of the locales it inherits from + """ + _cache_lock.acquire() + try: + data = _cache.get(name) + if not data: + # Load inherited data + if name == 'root' or not merge_inherited: + data = {} + else: + parts = name.split('_') + if len(parts) == 1: + parent = 'root' + else: + parent = '_'.join(parts[:-1]) + data = load(parent).copy() + filename = os.path.join(_dirname, '%s.dat' % name) + fileobj = open(filename, 'rb') + try: + if name != 'root' and merge_inherited: + merge(data, pickle.load(fileobj)) + else: + data = pickle.load(fileobj) + _cache[name] = data + finally: + fileobj.close() + return data + finally: + _cache_lock.release() + + +def merge(dict1, dict2): + """Merge the data from `dict2` into the `dict1` dictionary, making copies + of nested dictionaries. + + >>> d = {1: 'foo', 3: 'baz'} + >>> merge(d, {1: 'Foo', 2: 'Bar'}) + >>> items = d.items(); items.sort(); items + [(1, 'Foo'), (2, 'Bar'), (3, 'baz')] + + :param dict1: the dictionary to merge into + :param dict2: the dictionary containing the data that should be merged + """ + for key, val2 in dict2.items(): + if val2 is not None: + val1 = dict1.get(key) + if isinstance(val2, dict): + if val1 is None: + val1 = {} + if isinstance(val1, Alias): + val1 = (val1, val2) + elif isinstance(val1, tuple): + alias, others = val1 + others = others.copy() + merge(others, val2) + val1 = (alias, others) + else: + val1 = val1.copy() + merge(val1, val2) + else: + val1 = val2 + dict1[key] = val1 + + +class Alias(object): + """Representation of an alias in the locale data. + + An alias is a value that refers to some other part of the locale data, + as specified by the `keys`. + """ + + def __init__(self, keys): + self.keys = tuple(keys) + + def __repr__(self): + return '<%s %r>' % (type(self).__name__, self.keys) + + def resolve(self, data): + """Resolve the alias based on the given data. + + This is done recursively, so if one alias resolves to a second alias, + that second alias will also be resolved. + + :param data: the locale data + :type data: `dict` + """ + base = data + for key in self.keys: + data = data[key] + if isinstance(data, Alias): + data = data.resolve(base) + elif isinstance(data, tuple): + alias, others = data + data = alias.resolve(base) + return data + + +class LocaleDataDict(MutableMapping): + """Dictionary wrapper that automatically resolves aliases to the actual + values. + """ + + def __init__(self, data, base=None): + self._data = data + if base is None: + base = data + self.base = base + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + orig = val = self._data[key] + if isinstance(val, Alias): # resolve an alias + val = val.resolve(self.base) + if isinstance(val, tuple): # Merge a partial dict with an alias + alias, others = val + val = alias.resolve(self.base).copy() + merge(val, others) + if type(val) is dict: # Return a nested alias-resolving dict + val = LocaleDataDict(val, base=self.base) + if val is not orig: + self._data[key] = val + return val + + def __setitem__(self, key, value): + self._data[key] = value + + def __delitem__(self, key): + del self._data[key] + + def copy(self): + return LocaleDataDict(self._data.copy(), base=self.base) diff --git a/awx/lib/site-packages/babel/localedata/aa.dat b/awx/lib/site-packages/babel/localedata/aa.dat new file mode 100644 index 0000000000..7a5e498755 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/aa.dat differ diff --git a/awx/lib/site-packages/babel/localedata/aa_DJ.dat b/awx/lib/site-packages/babel/localedata/aa_DJ.dat new file mode 100644 index 0000000000..ceafe15576 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/aa_DJ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/aa_ER.dat b/awx/lib/site-packages/babel/localedata/aa_ER.dat new file mode 100644 index 0000000000..61ff1648da Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/aa_ER.dat differ diff --git a/awx/lib/site-packages/babel/localedata/aa_ET.dat b/awx/lib/site-packages/babel/localedata/aa_ET.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/aa_ET.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/af.dat b/awx/lib/site-packages/babel/localedata/af.dat new file mode 100644 index 0000000000..b17f184ab8 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/af.dat differ diff --git a/awx/lib/site-packages/babel/localedata/af_NA.dat b/awx/lib/site-packages/babel/localedata/af_NA.dat new file mode 100644 index 0000000000..c915eedcd9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/af_NA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/af_ZA.dat b/awx/lib/site-packages/babel/localedata/af_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/af_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/agq.dat b/awx/lib/site-packages/babel/localedata/agq.dat new file mode 100644 index 0000000000..0c57e93bef Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/agq.dat differ diff --git a/awx/lib/site-packages/babel/localedata/agq_CM.dat b/awx/lib/site-packages/babel/localedata/agq_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/agq_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ak.dat b/awx/lib/site-packages/babel/localedata/ak.dat new file mode 100644 index 0000000000..fa10f57beb Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ak.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ak_GH.dat b/awx/lib/site-packages/babel/localedata/ak_GH.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ak_GH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/am.dat b/awx/lib/site-packages/babel/localedata/am.dat new file mode 100644 index 0000000000..4bb060eb88 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/am.dat differ diff --git a/awx/lib/site-packages/babel/localedata/am_ET.dat b/awx/lib/site-packages/babel/localedata/am_ET.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/am_ET.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar.dat b/awx/lib/site-packages/babel/localedata/ar.dat new file mode 100644 index 0000000000..79d92cc7bf Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_001.dat b/awx/lib/site-packages/babel/localedata/ar_001.dat new file mode 100644 index 0000000000..5b852b0bba Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_001.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_AE.dat b/awx/lib/site-packages/babel/localedata/ar_AE.dat new file mode 100644 index 0000000000..72b454249a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ar_AE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar_BH.dat b/awx/lib/site-packages/babel/localedata/ar_BH.dat new file mode 100644 index 0000000000..72b454249a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ar_BH.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar_DJ.dat b/awx/lib/site-packages/babel/localedata/ar_DJ.dat new file mode 100644 index 0000000000..b0ee442122 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_DJ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_DZ.dat b/awx/lib/site-packages/babel/localedata/ar_DZ.dat new file mode 100644 index 0000000000..bf61a6fb7f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_DZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_EG.dat b/awx/lib/site-packages/babel/localedata/ar_EG.dat new file mode 100644 index 0000000000..01c29af978 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ar_EG.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar_EH.dat b/awx/lib/site-packages/babel/localedata/ar_EH.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_EH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_ER.dat b/awx/lib/site-packages/babel/localedata/ar_ER.dat new file mode 100644 index 0000000000..61ff1648da Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_ER.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_IL.dat b/awx/lib/site-packages/babel/localedata/ar_IL.dat new file mode 100644 index 0000000000..36bd112d1a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ar_IL.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar_IQ.dat b/awx/lib/site-packages/babel/localedata/ar_IQ.dat new file mode 100644 index 0000000000..882435e4e2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_IQ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_JO.dat b/awx/lib/site-packages/babel/localedata/ar_JO.dat new file mode 100644 index 0000000000..563ecb417d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_JO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_KM.dat b/awx/lib/site-packages/babel/localedata/ar_KM.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_KM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_KW.dat b/awx/lib/site-packages/babel/localedata/ar_KW.dat new file mode 100644 index 0000000000..72b454249a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ar_KW.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar_LB.dat b/awx/lib/site-packages/babel/localedata/ar_LB.dat new file mode 100644 index 0000000000..369f5368dd Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_LB.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_LY.dat b/awx/lib/site-packages/babel/localedata/ar_LY.dat new file mode 100644 index 0000000000..dc63b518c8 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_LY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_MA.dat b/awx/lib/site-packages/babel/localedata/ar_MA.dat new file mode 100644 index 0000000000..741e7913b5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_MA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_MR.dat b/awx/lib/site-packages/babel/localedata/ar_MR.dat new file mode 100644 index 0000000000..6ea05710e0 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_MR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_OM.dat b/awx/lib/site-packages/babel/localedata/ar_OM.dat new file mode 100644 index 0000000000..150c7e3b45 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ar_OM.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar_PS.dat b/awx/lib/site-packages/babel/localedata/ar_PS.dat new file mode 100644 index 0000000000..889cfe8c9e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_PS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_QA.dat b/awx/lib/site-packages/babel/localedata/ar_QA.dat new file mode 100644 index 0000000000..c63c35df6b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_QA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_SA.dat b/awx/lib/site-packages/babel/localedata/ar_SA.dat new file mode 100644 index 0000000000..052e74564a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_SA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_SD.dat b/awx/lib/site-packages/babel/localedata/ar_SD.dat new file mode 100644 index 0000000000..01c29af978 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ar_SD.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar_SO.dat b/awx/lib/site-packages/babel/localedata/ar_SO.dat new file mode 100644 index 0000000000..39a665033f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_SO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_SY.dat b/awx/lib/site-packages/babel/localedata/ar_SY.dat new file mode 100644 index 0000000000..96f96b03be Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_SY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_TD.dat b/awx/lib/site-packages/babel/localedata/ar_TD.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ar_TD.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ar_TN.dat b/awx/lib/site-packages/babel/localedata/ar_TN.dat new file mode 100644 index 0000000000..5144a9c7af Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_TN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ar_YE.dat b/awx/lib/site-packages/babel/localedata/ar_YE.dat new file mode 100644 index 0000000000..052e74564a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ar_YE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/as.dat b/awx/lib/site-packages/babel/localedata/as.dat new file mode 100644 index 0000000000..ecf4a3fb33 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/as.dat differ diff --git a/awx/lib/site-packages/babel/localedata/as_IN.dat b/awx/lib/site-packages/babel/localedata/as_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/as_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/asa.dat b/awx/lib/site-packages/babel/localedata/asa.dat new file mode 100644 index 0000000000..599f61930c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/asa.dat differ diff --git a/awx/lib/site-packages/babel/localedata/asa_TZ.dat b/awx/lib/site-packages/babel/localedata/asa_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/asa_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ast.dat b/awx/lib/site-packages/babel/localedata/ast.dat new file mode 100644 index 0000000000..b3544d0eb5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ast.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ast_ES.dat b/awx/lib/site-packages/babel/localedata/ast_ES.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ast_ES.dat differ diff --git a/awx/lib/site-packages/babel/localedata/az.dat b/awx/lib/site-packages/babel/localedata/az.dat new file mode 100644 index 0000000000..c31ec5935a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/az.dat differ diff --git a/awx/lib/site-packages/babel/localedata/az_Cyrl.dat b/awx/lib/site-packages/babel/localedata/az_Cyrl.dat new file mode 100644 index 0000000000..1d74d11bb2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/az_Cyrl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/az_Cyrl_AZ.dat b/awx/lib/site-packages/babel/localedata/az_Cyrl_AZ.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/az_Cyrl_AZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/az_Latn.dat b/awx/lib/site-packages/babel/localedata/az_Latn.dat new file mode 100644 index 0000000000..980ab6b6fc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/az_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/az_Latn_AZ.dat b/awx/lib/site-packages/babel/localedata/az_Latn_AZ.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/az_Latn_AZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bas.dat b/awx/lib/site-packages/babel/localedata/bas.dat new file mode 100644 index 0000000000..fe53dafc14 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bas.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bas_CM.dat b/awx/lib/site-packages/babel/localedata/bas_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bas_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/be.dat b/awx/lib/site-packages/babel/localedata/be.dat new file mode 100644 index 0000000000..00270ce944 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/be.dat differ diff --git a/awx/lib/site-packages/babel/localedata/be_BY.dat b/awx/lib/site-packages/babel/localedata/be_BY.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/be_BY.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/bem.dat b/awx/lib/site-packages/babel/localedata/bem.dat new file mode 100644 index 0000000000..65713a7cac Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bem.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bem_ZM.dat b/awx/lib/site-packages/babel/localedata/bem_ZM.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bem_ZM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bez.dat b/awx/lib/site-packages/babel/localedata/bez.dat new file mode 100644 index 0000000000..aa7bfc25b9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bez.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bez_TZ.dat b/awx/lib/site-packages/babel/localedata/bez_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bez_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bg.dat b/awx/lib/site-packages/babel/localedata/bg.dat new file mode 100644 index 0000000000..35d4342907 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bg.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bg_BG.dat b/awx/lib/site-packages/babel/localedata/bg_BG.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bg_BG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bm.dat b/awx/lib/site-packages/babel/localedata/bm.dat new file mode 100644 index 0000000000..9f5202cda9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bm.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bm_ML.dat b/awx/lib/site-packages/babel/localedata/bm_ML.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bm_ML.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bn.dat b/awx/lib/site-packages/babel/localedata/bn.dat new file mode 100644 index 0000000000..c5be3e5671 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bn_BD.dat b/awx/lib/site-packages/babel/localedata/bn_BD.dat new file mode 100644 index 0000000000..281a7813c8 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/bn_BD.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/bn_IN.dat b/awx/lib/site-packages/babel/localedata/bn_IN.dat new file mode 100644 index 0000000000..07613a585a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bn_IN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bo.dat b/awx/lib/site-packages/babel/localedata/bo.dat new file mode 100644 index 0000000000..bcfa2184a6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bo_CN.dat b/awx/lib/site-packages/babel/localedata/bo_CN.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/bo_CN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/bo_IN.dat b/awx/lib/site-packages/babel/localedata/bo_IN.dat new file mode 100644 index 0000000000..fbda3e34fc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bo_IN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/br.dat b/awx/lib/site-packages/babel/localedata/br.dat new file mode 100644 index 0000000000..2c2bc2ccac Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/br.dat differ diff --git a/awx/lib/site-packages/babel/localedata/br_FR.dat b/awx/lib/site-packages/babel/localedata/br_FR.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/br_FR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/brx.dat b/awx/lib/site-packages/babel/localedata/brx.dat new file mode 100644 index 0000000000..aaa57e92df Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/brx.dat differ diff --git a/awx/lib/site-packages/babel/localedata/brx_IN.dat b/awx/lib/site-packages/babel/localedata/brx_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/brx_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/bs.dat b/awx/lib/site-packages/babel/localedata/bs.dat new file mode 100644 index 0000000000..82abeed2a0 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bs.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bs_Cyrl.dat b/awx/lib/site-packages/babel/localedata/bs_Cyrl.dat new file mode 100644 index 0000000000..f152d9de89 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bs_Cyrl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bs_Cyrl_BA.dat b/awx/lib/site-packages/babel/localedata/bs_Cyrl_BA.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bs_Cyrl_BA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bs_Latn.dat b/awx/lib/site-packages/babel/localedata/bs_Latn.dat new file mode 100644 index 0000000000..a882c5bcaf Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bs_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/bs_Latn_BA.dat b/awx/lib/site-packages/babel/localedata/bs_Latn_BA.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/bs_Latn_BA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/byn.dat b/awx/lib/site-packages/babel/localedata/byn.dat new file mode 100644 index 0000000000..6cbcf5a446 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/byn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/byn_ER.dat b/awx/lib/site-packages/babel/localedata/byn_ER.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/byn_ER.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ca.dat b/awx/lib/site-packages/babel/localedata/ca.dat new file mode 100644 index 0000000000..82aa8738ea Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ca.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ca_AD.dat b/awx/lib/site-packages/babel/localedata/ca_AD.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ca_AD.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ca_ES.dat b/awx/lib/site-packages/babel/localedata/ca_ES.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ca_ES.dat differ diff --git a/awx/lib/site-packages/babel/localedata/cgg.dat b/awx/lib/site-packages/babel/localedata/cgg.dat new file mode 100644 index 0000000000..7550fa7592 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/cgg.dat differ diff --git a/awx/lib/site-packages/babel/localedata/cgg_UG.dat b/awx/lib/site-packages/babel/localedata/cgg_UG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/cgg_UG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/chr.dat b/awx/lib/site-packages/babel/localedata/chr.dat new file mode 100644 index 0000000000..65dee5bbde Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/chr.dat differ diff --git a/awx/lib/site-packages/babel/localedata/chr_US.dat b/awx/lib/site-packages/babel/localedata/chr_US.dat new file mode 100644 index 0000000000..e1639abe76 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/chr_US.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/cs.dat b/awx/lib/site-packages/babel/localedata/cs.dat new file mode 100644 index 0000000000..f85d6ade6b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/cs.dat differ diff --git a/awx/lib/site-packages/babel/localedata/cs_CZ.dat b/awx/lib/site-packages/babel/localedata/cs_CZ.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/cs_CZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/cy.dat b/awx/lib/site-packages/babel/localedata/cy.dat new file mode 100644 index 0000000000..c6ec58fe20 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/cy.dat differ diff --git a/awx/lib/site-packages/babel/localedata/cy_GB.dat b/awx/lib/site-packages/babel/localedata/cy_GB.dat new file mode 100644 index 0000000000..401708ffe0 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/cy_GB.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/da.dat b/awx/lib/site-packages/babel/localedata/da.dat new file mode 100644 index 0000000000..47548dd0cc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/da.dat differ diff --git a/awx/lib/site-packages/babel/localedata/da_DK.dat b/awx/lib/site-packages/babel/localedata/da_DK.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/da_DK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dav.dat b/awx/lib/site-packages/babel/localedata/dav.dat new file mode 100644 index 0000000000..cc17bc0b8e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/dav.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dav_KE.dat b/awx/lib/site-packages/babel/localedata/dav_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/dav_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/de.dat b/awx/lib/site-packages/babel/localedata/de.dat new file mode 100644 index 0000000000..ba55b5074d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/de.dat differ diff --git a/awx/lib/site-packages/babel/localedata/de_AT.dat b/awx/lib/site-packages/babel/localedata/de_AT.dat new file mode 100644 index 0000000000..bee43e3159 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/de_AT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/de_BE.dat b/awx/lib/site-packages/babel/localedata/de_BE.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/de_BE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/de_CH.dat b/awx/lib/site-packages/babel/localedata/de_CH.dat new file mode 100644 index 0000000000..a4d26be1dd Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/de_CH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/de_DE.dat b/awx/lib/site-packages/babel/localedata/de_DE.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/de_DE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/de_LI.dat b/awx/lib/site-packages/babel/localedata/de_LI.dat new file mode 100644 index 0000000000..2b92c9b00a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/de_LI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/de_LU.dat b/awx/lib/site-packages/babel/localedata/de_LU.dat new file mode 100644 index 0000000000..8d080e12b2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/de_LU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dje.dat b/awx/lib/site-packages/babel/localedata/dje.dat new file mode 100644 index 0000000000..b3997c7552 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/dje.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dje_NE.dat b/awx/lib/site-packages/babel/localedata/dje_NE.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/dje_NE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dua.dat b/awx/lib/site-packages/babel/localedata/dua.dat new file mode 100644 index 0000000000..2693dc173c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/dua.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dua_CM.dat b/awx/lib/site-packages/babel/localedata/dua_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/dua_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dyo.dat b/awx/lib/site-packages/babel/localedata/dyo.dat new file mode 100644 index 0000000000..9bd62330c0 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/dyo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dyo_SN.dat b/awx/lib/site-packages/babel/localedata/dyo_SN.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/dyo_SN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dz.dat b/awx/lib/site-packages/babel/localedata/dz.dat new file mode 100644 index 0000000000..0ba26a88c5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/dz.dat differ diff --git a/awx/lib/site-packages/babel/localedata/dz_BT.dat b/awx/lib/site-packages/babel/localedata/dz_BT.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/dz_BT.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ebu.dat b/awx/lib/site-packages/babel/localedata/ebu.dat new file mode 100644 index 0000000000..2ce0ac43ff Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ebu.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ebu_KE.dat b/awx/lib/site-packages/babel/localedata/ebu_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ebu_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ee.dat b/awx/lib/site-packages/babel/localedata/ee.dat new file mode 100644 index 0000000000..28c274332f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ee.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ee_GH.dat b/awx/lib/site-packages/babel/localedata/ee_GH.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ee_GH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ee_TG.dat b/awx/lib/site-packages/babel/localedata/ee_TG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ee_TG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/el.dat b/awx/lib/site-packages/babel/localedata/el.dat new file mode 100644 index 0000000000..9c10ce51b4 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/el.dat differ diff --git a/awx/lib/site-packages/babel/localedata/el_CY.dat b/awx/lib/site-packages/babel/localedata/el_CY.dat new file mode 100644 index 0000000000..83f7d85433 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/el_CY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/el_GR.dat b/awx/lib/site-packages/babel/localedata/el_GR.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/el_GR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en.dat b/awx/lib/site-packages/babel/localedata/en.dat new file mode 100644 index 0000000000..cbe140ca9d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_150.dat b/awx/lib/site-packages/babel/localedata/en_150.dat new file mode 100644 index 0000000000..23a79939be Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_150.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_AG.dat b/awx/lib/site-packages/babel/localedata/en_AG.dat new file mode 100644 index 0000000000..c6f15402ff Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_AG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_AS.dat b/awx/lib/site-packages/babel/localedata/en_AS.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_AS.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_AU.dat b/awx/lib/site-packages/babel/localedata/en_AU.dat new file mode 100644 index 0000000000..14472b3627 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_AU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_BB.dat b/awx/lib/site-packages/babel/localedata/en_BB.dat new file mode 100644 index 0000000000..40f7b9702e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_BB.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_BE.dat b/awx/lib/site-packages/babel/localedata/en_BE.dat new file mode 100644 index 0000000000..89586c286e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_BE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_BM.dat b/awx/lib/site-packages/babel/localedata/en_BM.dat new file mode 100644 index 0000000000..cf27a79bb5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_BM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_BS.dat b/awx/lib/site-packages/babel/localedata/en_BS.dat new file mode 100644 index 0000000000..9f24a114ec Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_BS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_BW.dat b/awx/lib/site-packages/babel/localedata/en_BW.dat new file mode 100644 index 0000000000..a27e49f681 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_BW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_BZ.dat b/awx/lib/site-packages/babel/localedata/en_BZ.dat new file mode 100644 index 0000000000..de2936c152 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_BZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_CA.dat b/awx/lib/site-packages/babel/localedata/en_CA.dat new file mode 100644 index 0000000000..f46ef18861 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_CA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_CM.dat b/awx/lib/site-packages/babel/localedata/en_CM.dat new file mode 100644 index 0000000000..ec06b5e903 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_DM.dat b/awx/lib/site-packages/babel/localedata/en_DM.dat new file mode 100644 index 0000000000..c6f15402ff Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_DM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_Dsrt.dat b/awx/lib/site-packages/babel/localedata/en_Dsrt.dat new file mode 100644 index 0000000000..c9b5e98def Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_Dsrt.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_Dsrt_US.dat b/awx/lib/site-packages/babel/localedata/en_Dsrt_US.dat new file mode 100644 index 0000000000..e1639abe76 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_Dsrt_US.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_FJ.dat b/awx/lib/site-packages/babel/localedata/en_FJ.dat new file mode 100644 index 0000000000..33e22056a2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_FJ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_FM.dat b/awx/lib/site-packages/babel/localedata/en_FM.dat new file mode 100644 index 0000000000..b2c2947d53 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_FM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_GB.dat b/awx/lib/site-packages/babel/localedata/en_GB.dat new file mode 100644 index 0000000000..2baebd09a9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_GB.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_GD.dat b/awx/lib/site-packages/babel/localedata/en_GD.dat new file mode 100644 index 0000000000..049f10a163 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_GD.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_GG.dat b/awx/lib/site-packages/babel/localedata/en_GG.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_GG.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_GH.dat b/awx/lib/site-packages/babel/localedata/en_GH.dat new file mode 100644 index 0000000000..dceb36d48b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_GH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_GI.dat b/awx/lib/site-packages/babel/localedata/en_GI.dat new file mode 100644 index 0000000000..33a2e60a00 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_GI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_GM.dat b/awx/lib/site-packages/babel/localedata/en_GM.dat new file mode 100644 index 0000000000..a7c4e45565 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_GM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_GU.dat b/awx/lib/site-packages/babel/localedata/en_GU.dat new file mode 100644 index 0000000000..b022c61444 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_GU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_GY.dat b/awx/lib/site-packages/babel/localedata/en_GY.dat new file mode 100644 index 0000000000..19a886ca98 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_GY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_HK.dat b/awx/lib/site-packages/babel/localedata/en_HK.dat new file mode 100644 index 0000000000..a62e01372b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_HK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_IE.dat b/awx/lib/site-packages/babel/localedata/en_IE.dat new file mode 100644 index 0000000000..e4bd14ef84 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_IE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_IM.dat b/awx/lib/site-packages/babel/localedata/en_IM.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_IM.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_IN.dat b/awx/lib/site-packages/babel/localedata/en_IN.dat new file mode 100644 index 0000000000..cea4cae933 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_IN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_JE.dat b/awx/lib/site-packages/babel/localedata/en_JE.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_JE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_JM.dat b/awx/lib/site-packages/babel/localedata/en_JM.dat new file mode 100644 index 0000000000..e792cb936a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_JM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_KE.dat b/awx/lib/site-packages/babel/localedata/en_KE.dat new file mode 100644 index 0000000000..f1dc0e5d37 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_KE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_KI.dat b/awx/lib/site-packages/babel/localedata/en_KI.dat new file mode 100644 index 0000000000..b87da16d16 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_KI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_KN.dat b/awx/lib/site-packages/babel/localedata/en_KN.dat new file mode 100644 index 0000000000..049f10a163 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_KN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_KY.dat b/awx/lib/site-packages/babel/localedata/en_KY.dat new file mode 100644 index 0000000000..a6415f498d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_KY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_LC.dat b/awx/lib/site-packages/babel/localedata/en_LC.dat new file mode 100644 index 0000000000..049f10a163 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_LC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_LR.dat b/awx/lib/site-packages/babel/localedata/en_LR.dat new file mode 100644 index 0000000000..e92eac71bd Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_LR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_LS.dat b/awx/lib/site-packages/babel/localedata/en_LS.dat new file mode 100644 index 0000000000..76aab995ba Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_LS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_MG.dat b/awx/lib/site-packages/babel/localedata/en_MG.dat new file mode 100644 index 0000000000..25efe8a01d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_MG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_MH.dat b/awx/lib/site-packages/babel/localedata/en_MH.dat new file mode 100644 index 0000000000..2a51e5ab8d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_MH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_MP.dat b/awx/lib/site-packages/babel/localedata/en_MP.dat new file mode 100644 index 0000000000..b2c2947d53 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_MP.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_MT.dat b/awx/lib/site-packages/babel/localedata/en_MT.dat new file mode 100644 index 0000000000..d68588a1c7 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_MT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_MU.dat b/awx/lib/site-packages/babel/localedata/en_MU.dat new file mode 100644 index 0000000000..6434affc27 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_MU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_MW.dat b/awx/lib/site-packages/babel/localedata/en_MW.dat new file mode 100644 index 0000000000..2b728133de Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_MW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_NA.dat b/awx/lib/site-packages/babel/localedata/en_NA.dat new file mode 100644 index 0000000000..ab7159fbac Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_NA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_NG.dat b/awx/lib/site-packages/babel/localedata/en_NG.dat new file mode 100644 index 0000000000..ca4464e6c1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_NG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_NZ.dat b/awx/lib/site-packages/babel/localedata/en_NZ.dat new file mode 100644 index 0000000000..1e8d8df3d4 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_NZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_PG.dat b/awx/lib/site-packages/babel/localedata/en_PG.dat new file mode 100644 index 0000000000..57dc8c5588 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_PG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_PH.dat b/awx/lib/site-packages/babel/localedata/en_PH.dat new file mode 100644 index 0000000000..edf7178fdb Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_PH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_PK.dat b/awx/lib/site-packages/babel/localedata/en_PK.dat new file mode 100644 index 0000000000..40744ea85f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_PK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_PR.dat b/awx/lib/site-packages/babel/localedata/en_PR.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_PR.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_PW.dat b/awx/lib/site-packages/babel/localedata/en_PW.dat new file mode 100644 index 0000000000..b2c2947d53 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_PW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_SB.dat b/awx/lib/site-packages/babel/localedata/en_SB.dat new file mode 100644 index 0000000000..eaab8887d3 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_SB.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_SC.dat b/awx/lib/site-packages/babel/localedata/en_SC.dat new file mode 100644 index 0000000000..355ec817e0 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_SC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_SG.dat b/awx/lib/site-packages/babel/localedata/en_SG.dat new file mode 100644 index 0000000000..d12d553c42 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_SG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_SL.dat b/awx/lib/site-packages/babel/localedata/en_SL.dat new file mode 100644 index 0000000000..a9b33f38f1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_SL.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_SS.dat b/awx/lib/site-packages/babel/localedata/en_SS.dat new file mode 100644 index 0000000000..98b33925d6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_SS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_SZ.dat b/awx/lib/site-packages/babel/localedata/en_SZ.dat new file mode 100644 index 0000000000..2bd0229dc5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_SZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_TC.dat b/awx/lib/site-packages/babel/localedata/en_TC.dat new file mode 100644 index 0000000000..59cbead6d3 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_TC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_TO.dat b/awx/lib/site-packages/babel/localedata/en_TO.dat new file mode 100644 index 0000000000..5f95a001b6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_TO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_TT.dat b/awx/lib/site-packages/babel/localedata/en_TT.dat new file mode 100644 index 0000000000..fa9a6ce137 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_TT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_TZ.dat b/awx/lib/site-packages/babel/localedata/en_TZ.dat new file mode 100644 index 0000000000..26922be123 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_UG.dat b/awx/lib/site-packages/babel/localedata/en_UG.dat new file mode 100644 index 0000000000..c41cbbf007 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_UG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_UM.dat b/awx/lib/site-packages/babel/localedata/en_UM.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_UM.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_US.dat b/awx/lib/site-packages/babel/localedata/en_US.dat new file mode 100644 index 0000000000..e1639abe76 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_US.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_US_POSIX.dat b/awx/lib/site-packages/babel/localedata/en_US_POSIX.dat new file mode 100644 index 0000000000..09cbe7ecb7 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_US_POSIX.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_VC.dat b/awx/lib/site-packages/babel/localedata/en_VC.dat new file mode 100644 index 0000000000..049f10a163 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_VC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_VG.dat b/awx/lib/site-packages/babel/localedata/en_VG.dat new file mode 100644 index 0000000000..59cbead6d3 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_VG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_VI.dat b/awx/lib/site-packages/babel/localedata/en_VI.dat new file mode 100644 index 0000000000..e1639abe76 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/en_VI.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/en_VU.dat b/awx/lib/site-packages/babel/localedata/en_VU.dat new file mode 100644 index 0000000000..bf18911127 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_VU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_WS.dat b/awx/lib/site-packages/babel/localedata/en_WS.dat new file mode 100644 index 0000000000..d9693cc4f3 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_WS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_ZA.dat b/awx/lib/site-packages/babel/localedata/en_ZA.dat new file mode 100644 index 0000000000..c66ff2ba78 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_ZA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_ZM.dat b/awx/lib/site-packages/babel/localedata/en_ZM.dat new file mode 100644 index 0000000000..6faaea8510 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_ZM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/en_ZW.dat b/awx/lib/site-packages/babel/localedata/en_ZW.dat new file mode 100644 index 0000000000..97fee5615f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/en_ZW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/eo.dat b/awx/lib/site-packages/babel/localedata/eo.dat new file mode 100644 index 0000000000..452c849188 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/eo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es.dat b/awx/lib/site-packages/babel/localedata/es.dat new file mode 100644 index 0000000000..a48af1bc21 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_419.dat b/awx/lib/site-packages/babel/localedata/es_419.dat new file mode 100644 index 0000000000..b3a15f1bde Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_419.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_AR.dat b/awx/lib/site-packages/babel/localedata/es_AR.dat new file mode 100644 index 0000000000..7b21f4fce5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_AR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_BO.dat b/awx/lib/site-packages/babel/localedata/es_BO.dat new file mode 100644 index 0000000000..438c36005f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_BO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_CL.dat b/awx/lib/site-packages/babel/localedata/es_CL.dat new file mode 100644 index 0000000000..e11cc51d06 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_CL.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_CO.dat b/awx/lib/site-packages/babel/localedata/es_CO.dat new file mode 100644 index 0000000000..dd04333657 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_CO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_CR.dat b/awx/lib/site-packages/babel/localedata/es_CR.dat new file mode 100644 index 0000000000..f433899fee Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_CR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_CU.dat b/awx/lib/site-packages/babel/localedata/es_CU.dat new file mode 100644 index 0000000000..7228820220 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_CU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_DO.dat b/awx/lib/site-packages/babel/localedata/es_DO.dat new file mode 100644 index 0000000000..66bebe8429 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_DO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_EA.dat b/awx/lib/site-packages/babel/localedata/es_EA.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_EA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_EC.dat b/awx/lib/site-packages/babel/localedata/es_EC.dat new file mode 100644 index 0000000000..83d1eaf041 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_EC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_ES.dat b/awx/lib/site-packages/babel/localedata/es_ES.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_ES.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_GQ.dat b/awx/lib/site-packages/babel/localedata/es_GQ.dat new file mode 100644 index 0000000000..13b86e08be Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_GQ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_GT.dat b/awx/lib/site-packages/babel/localedata/es_GT.dat new file mode 100644 index 0000000000..83f5f6a05c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_GT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_HN.dat b/awx/lib/site-packages/babel/localedata/es_HN.dat new file mode 100644 index 0000000000..4aa9304d56 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_HN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_IC.dat b/awx/lib/site-packages/babel/localedata/es_IC.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_IC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_MX.dat b/awx/lib/site-packages/babel/localedata/es_MX.dat new file mode 100644 index 0000000000..ae1c2a346f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_MX.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_NI.dat b/awx/lib/site-packages/babel/localedata/es_NI.dat new file mode 100644 index 0000000000..7dfe184861 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_NI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_PA.dat b/awx/lib/site-packages/babel/localedata/es_PA.dat new file mode 100644 index 0000000000..1222b4600e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_PA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_PE.dat b/awx/lib/site-packages/babel/localedata/es_PE.dat new file mode 100644 index 0000000000..4da742810d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_PE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_PH.dat b/awx/lib/site-packages/babel/localedata/es_PH.dat new file mode 100644 index 0000000000..f4663c6eed Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_PH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_PR.dat b/awx/lib/site-packages/babel/localedata/es_PR.dat new file mode 100644 index 0000000000..8db3f313b6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_PR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_PY.dat b/awx/lib/site-packages/babel/localedata/es_PY.dat new file mode 100644 index 0000000000..0a022b42d7 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_PY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_SV.dat b/awx/lib/site-packages/babel/localedata/es_SV.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/es_SV.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/es_US.dat b/awx/lib/site-packages/babel/localedata/es_US.dat new file mode 100644 index 0000000000..e427ff3c61 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_US.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_UY.dat b/awx/lib/site-packages/babel/localedata/es_UY.dat new file mode 100644 index 0000000000..bfd9e03337 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_UY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/es_VE.dat b/awx/lib/site-packages/babel/localedata/es_VE.dat new file mode 100644 index 0000000000..65b44e1b35 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/es_VE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/et.dat b/awx/lib/site-packages/babel/localedata/et.dat new file mode 100644 index 0000000000..a9d8bb9e1d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/et.dat differ diff --git a/awx/lib/site-packages/babel/localedata/et_EE.dat b/awx/lib/site-packages/babel/localedata/et_EE.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/et_EE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/eu.dat b/awx/lib/site-packages/babel/localedata/eu.dat new file mode 100644 index 0000000000..6131e09633 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/eu.dat differ diff --git a/awx/lib/site-packages/babel/localedata/eu_ES.dat b/awx/lib/site-packages/babel/localedata/eu_ES.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/eu_ES.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ewo.dat b/awx/lib/site-packages/babel/localedata/ewo.dat new file mode 100644 index 0000000000..24172c24f4 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ewo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ewo_CM.dat b/awx/lib/site-packages/babel/localedata/ewo_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ewo_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fa.dat b/awx/lib/site-packages/babel/localedata/fa.dat new file mode 100644 index 0000000000..e5d7d4a2d9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fa.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fa_AF.dat b/awx/lib/site-packages/babel/localedata/fa_AF.dat new file mode 100644 index 0000000000..b7592aa281 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fa_AF.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fa_IR.dat b/awx/lib/site-packages/babel/localedata/fa_IR.dat new file mode 100644 index 0000000000..150c7e3b45 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fa_IR.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ff.dat b/awx/lib/site-packages/babel/localedata/ff.dat new file mode 100644 index 0000000000..5b3aab1cb2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ff.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ff_SN.dat b/awx/lib/site-packages/babel/localedata/ff_SN.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ff_SN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fi.dat b/awx/lib/site-packages/babel/localedata/fi.dat new file mode 100644 index 0000000000..cd6383776b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fi.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fi_FI.dat b/awx/lib/site-packages/babel/localedata/fi_FI.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fi_FI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fil.dat b/awx/lib/site-packages/babel/localedata/fil.dat new file mode 100644 index 0000000000..4e790acd18 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fil.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fil_PH.dat b/awx/lib/site-packages/babel/localedata/fil_PH.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fil_PH.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fo.dat b/awx/lib/site-packages/babel/localedata/fo.dat new file mode 100644 index 0000000000..53dad88146 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fo_FO.dat b/awx/lib/site-packages/babel/localedata/fo_FO.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fo_FO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr.dat b/awx/lib/site-packages/babel/localedata/fr.dat new file mode 100644 index 0000000000..12624b7779 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_BE.dat b/awx/lib/site-packages/babel/localedata/fr_BE.dat new file mode 100644 index 0000000000..873a78c0db Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_BE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_BF.dat b/awx/lib/site-packages/babel/localedata/fr_BF.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_BF.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_BI.dat b/awx/lib/site-packages/babel/localedata/fr_BI.dat new file mode 100644 index 0000000000..0e37e69993 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_BI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_BJ.dat b/awx/lib/site-packages/babel/localedata/fr_BJ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_BJ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_BL.dat b/awx/lib/site-packages/babel/localedata/fr_BL.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fr_BL.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fr_CA.dat b/awx/lib/site-packages/babel/localedata/fr_CA.dat new file mode 100644 index 0000000000..22e00e6a87 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_CA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_CD.dat b/awx/lib/site-packages/babel/localedata/fr_CD.dat new file mode 100644 index 0000000000..54d87d994b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_CD.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_CF.dat b/awx/lib/site-packages/babel/localedata/fr_CF.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fr_CF.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fr_CG.dat b/awx/lib/site-packages/babel/localedata/fr_CG.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fr_CG.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fr_CH.dat b/awx/lib/site-packages/babel/localedata/fr_CH.dat new file mode 100644 index 0000000000..da2a9705bb Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_CH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_CI.dat b/awx/lib/site-packages/babel/localedata/fr_CI.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_CI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_CM.dat b/awx/lib/site-packages/babel/localedata/fr_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_DJ.dat b/awx/lib/site-packages/babel/localedata/fr_DJ.dat new file mode 100644 index 0000000000..b0ee442122 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_DJ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_DZ.dat b/awx/lib/site-packages/babel/localedata/fr_DZ.dat new file mode 100644 index 0000000000..be317e2b95 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_DZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_FR.dat b/awx/lib/site-packages/babel/localedata/fr_FR.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_FR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_GA.dat b/awx/lib/site-packages/babel/localedata/fr_GA.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fr_GA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fr_GF.dat b/awx/lib/site-packages/babel/localedata/fr_GF.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_GF.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_GN.dat b/awx/lib/site-packages/babel/localedata/fr_GN.dat new file mode 100644 index 0000000000..4207c303c0 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_GN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_GP.dat b/awx/lib/site-packages/babel/localedata/fr_GP.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_GP.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_GQ.dat b/awx/lib/site-packages/babel/localedata/fr_GQ.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fr_GQ.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fr_HT.dat b/awx/lib/site-packages/babel/localedata/fr_HT.dat new file mode 100644 index 0000000000..b57f296d4f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_HT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_KM.dat b/awx/lib/site-packages/babel/localedata/fr_KM.dat new file mode 100644 index 0000000000..6a215c9a74 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_KM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_LU.dat b/awx/lib/site-packages/babel/localedata/fr_LU.dat new file mode 100644 index 0000000000..947f11cb5e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_LU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_MA.dat b/awx/lib/site-packages/babel/localedata/fr_MA.dat new file mode 100644 index 0000000000..01c29af978 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fr_MA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fr_MC.dat b/awx/lib/site-packages/babel/localedata/fr_MC.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_MC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_MF.dat b/awx/lib/site-packages/babel/localedata/fr_MF.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fr_MF.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fr_MG.dat b/awx/lib/site-packages/babel/localedata/fr_MG.dat new file mode 100644 index 0000000000..507d98791c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_MG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_ML.dat b/awx/lib/site-packages/babel/localedata/fr_ML.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_ML.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_MQ.dat b/awx/lib/site-packages/babel/localedata/fr_MQ.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_MQ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_MR.dat b/awx/lib/site-packages/babel/localedata/fr_MR.dat new file mode 100644 index 0000000000..e37ceddb21 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_MR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_MU.dat b/awx/lib/site-packages/babel/localedata/fr_MU.dat new file mode 100644 index 0000000000..1dcad22d3f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_MU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_NC.dat b/awx/lib/site-packages/babel/localedata/fr_NC.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_NC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_NE.dat b/awx/lib/site-packages/babel/localedata/fr_NE.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_NE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_PF.dat b/awx/lib/site-packages/babel/localedata/fr_PF.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_PF.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_RE.dat b/awx/lib/site-packages/babel/localedata/fr_RE.dat new file mode 100644 index 0000000000..9721f8c0fd Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_RE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_RW.dat b/awx/lib/site-packages/babel/localedata/fr_RW.dat new file mode 100644 index 0000000000..a6a51af8b6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_RW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_SC.dat b/awx/lib/site-packages/babel/localedata/fr_SC.dat new file mode 100644 index 0000000000..017b99b9a7 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_SC.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_SN.dat b/awx/lib/site-packages/babel/localedata/fr_SN.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_SN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_SY.dat b/awx/lib/site-packages/babel/localedata/fr_SY.dat new file mode 100644 index 0000000000..d2dd10e5bb Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_SY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_TD.dat b/awx/lib/site-packages/babel/localedata/fr_TD.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/fr_TD.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/fr_TG.dat b/awx/lib/site-packages/babel/localedata/fr_TG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_TG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_TN.dat b/awx/lib/site-packages/babel/localedata/fr_TN.dat new file mode 100644 index 0000000000..70cc066160 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_TN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_VU.dat b/awx/lib/site-packages/babel/localedata/fr_VU.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_VU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fr_YT.dat b/awx/lib/site-packages/babel/localedata/fr_YT.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fr_YT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fur.dat b/awx/lib/site-packages/babel/localedata/fur.dat new file mode 100644 index 0000000000..92767739a7 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fur.dat differ diff --git a/awx/lib/site-packages/babel/localedata/fur_IT.dat b/awx/lib/site-packages/babel/localedata/fur_IT.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/fur_IT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ga.dat b/awx/lib/site-packages/babel/localedata/ga.dat new file mode 100644 index 0000000000..e47984a399 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ga.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ga_IE.dat b/awx/lib/site-packages/babel/localedata/ga_IE.dat new file mode 100644 index 0000000000..401708ffe0 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ga_IE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/gd.dat b/awx/lib/site-packages/babel/localedata/gd.dat new file mode 100644 index 0000000000..d8d4a0357e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/gd.dat differ diff --git a/awx/lib/site-packages/babel/localedata/gd_GB.dat b/awx/lib/site-packages/babel/localedata/gd_GB.dat new file mode 100644 index 0000000000..401708ffe0 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/gd_GB.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/gl.dat b/awx/lib/site-packages/babel/localedata/gl.dat new file mode 100644 index 0000000000..c9fcaabfbc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/gl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/gl_ES.dat b/awx/lib/site-packages/babel/localedata/gl_ES.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/gl_ES.dat differ diff --git a/awx/lib/site-packages/babel/localedata/gsw.dat b/awx/lib/site-packages/babel/localedata/gsw.dat new file mode 100644 index 0000000000..061781160a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/gsw.dat differ diff --git a/awx/lib/site-packages/babel/localedata/gsw_CH.dat b/awx/lib/site-packages/babel/localedata/gsw_CH.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/gsw_CH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/gu.dat b/awx/lib/site-packages/babel/localedata/gu.dat new file mode 100644 index 0000000000..19621ca20f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/gu.dat differ diff --git a/awx/lib/site-packages/babel/localedata/gu_IN.dat b/awx/lib/site-packages/babel/localedata/gu_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/gu_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/guz.dat b/awx/lib/site-packages/babel/localedata/guz.dat new file mode 100644 index 0000000000..d287c50cfe Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/guz.dat differ diff --git a/awx/lib/site-packages/babel/localedata/guz_KE.dat b/awx/lib/site-packages/babel/localedata/guz_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/guz_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/gv.dat b/awx/lib/site-packages/babel/localedata/gv.dat new file mode 100644 index 0000000000..4d92358922 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/gv.dat differ diff --git a/awx/lib/site-packages/babel/localedata/gv_GB.dat b/awx/lib/site-packages/babel/localedata/gv_GB.dat new file mode 100644 index 0000000000..401708ffe0 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/gv_GB.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ha.dat b/awx/lib/site-packages/babel/localedata/ha.dat new file mode 100644 index 0000000000..d42e1f232b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ha.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ha_Latn.dat b/awx/lib/site-packages/babel/localedata/ha_Latn.dat new file mode 100644 index 0000000000..27760a1c92 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ha_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ha_Latn_GH.dat b/awx/lib/site-packages/babel/localedata/ha_Latn_GH.dat new file mode 100644 index 0000000000..a6c4ac105f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ha_Latn_GH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ha_Latn_NE.dat b/awx/lib/site-packages/babel/localedata/ha_Latn_NE.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ha_Latn_NE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ha_Latn_NG.dat b/awx/lib/site-packages/babel/localedata/ha_Latn_NG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ha_Latn_NG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/haw.dat b/awx/lib/site-packages/babel/localedata/haw.dat new file mode 100644 index 0000000000..74c46ef839 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/haw.dat differ diff --git a/awx/lib/site-packages/babel/localedata/haw_US.dat b/awx/lib/site-packages/babel/localedata/haw_US.dat new file mode 100644 index 0000000000..e1639abe76 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/haw_US.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/he.dat b/awx/lib/site-packages/babel/localedata/he.dat new file mode 100644 index 0000000000..f085ae17bd Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/he.dat differ diff --git a/awx/lib/site-packages/babel/localedata/he_IL.dat b/awx/lib/site-packages/babel/localedata/he_IL.dat new file mode 100644 index 0000000000..36bd112d1a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/he_IL.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/hi.dat b/awx/lib/site-packages/babel/localedata/hi.dat new file mode 100644 index 0000000000..26f01c7ee8 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/hi.dat differ diff --git a/awx/lib/site-packages/babel/localedata/hi_IN.dat b/awx/lib/site-packages/babel/localedata/hi_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/hi_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/hr.dat b/awx/lib/site-packages/babel/localedata/hr.dat new file mode 100644 index 0000000000..c11d395194 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/hr.dat differ diff --git a/awx/lib/site-packages/babel/localedata/hr_BA.dat b/awx/lib/site-packages/babel/localedata/hr_BA.dat new file mode 100644 index 0000000000..7b23688527 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/hr_BA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/hr_HR.dat b/awx/lib/site-packages/babel/localedata/hr_HR.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/hr_HR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/hu.dat b/awx/lib/site-packages/babel/localedata/hu.dat new file mode 100644 index 0000000000..ddc9aae167 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/hu.dat differ diff --git a/awx/lib/site-packages/babel/localedata/hu_HU.dat b/awx/lib/site-packages/babel/localedata/hu_HU.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/hu_HU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/hy.dat b/awx/lib/site-packages/babel/localedata/hy.dat new file mode 100644 index 0000000000..59fc3928e9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/hy.dat differ diff --git a/awx/lib/site-packages/babel/localedata/hy_AM.dat b/awx/lib/site-packages/babel/localedata/hy_AM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/hy_AM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ia.dat b/awx/lib/site-packages/babel/localedata/ia.dat new file mode 100644 index 0000000000..d97e3257c2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ia.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ia_FR.dat b/awx/lib/site-packages/babel/localedata/ia_FR.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ia_FR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/id.dat b/awx/lib/site-packages/babel/localedata/id.dat new file mode 100644 index 0000000000..1b8398a67d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/id.dat differ diff --git a/awx/lib/site-packages/babel/localedata/id_ID.dat b/awx/lib/site-packages/babel/localedata/id_ID.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/id_ID.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ig.dat b/awx/lib/site-packages/babel/localedata/ig.dat new file mode 100644 index 0000000000..c19bb598f7 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ig.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ig_NG.dat b/awx/lib/site-packages/babel/localedata/ig_NG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ig_NG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ii.dat b/awx/lib/site-packages/babel/localedata/ii.dat new file mode 100644 index 0000000000..1b6499e90c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ii.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ii_CN.dat b/awx/lib/site-packages/babel/localedata/ii_CN.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ii_CN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/is.dat b/awx/lib/site-packages/babel/localedata/is.dat new file mode 100644 index 0000000000..f1996ea61d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/is.dat differ diff --git a/awx/lib/site-packages/babel/localedata/is_IS.dat b/awx/lib/site-packages/babel/localedata/is_IS.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/is_IS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/it.dat b/awx/lib/site-packages/babel/localedata/it.dat new file mode 100644 index 0000000000..b084d771c9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/it.dat differ diff --git a/awx/lib/site-packages/babel/localedata/it_CH.dat b/awx/lib/site-packages/babel/localedata/it_CH.dat new file mode 100644 index 0000000000..d37370cc80 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/it_CH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/it_IT.dat b/awx/lib/site-packages/babel/localedata/it_IT.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/it_IT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/it_SM.dat b/awx/lib/site-packages/babel/localedata/it_SM.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/it_SM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ja.dat b/awx/lib/site-packages/babel/localedata/ja.dat new file mode 100644 index 0000000000..7351a74748 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ja.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ja_JP.dat b/awx/lib/site-packages/babel/localedata/ja_JP.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ja_JP.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/jgo.dat b/awx/lib/site-packages/babel/localedata/jgo.dat new file mode 100644 index 0000000000..616271b58e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/jgo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/jgo_CM.dat b/awx/lib/site-packages/babel/localedata/jgo_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/jgo_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/jmc.dat b/awx/lib/site-packages/babel/localedata/jmc.dat new file mode 100644 index 0000000000..ccc3b8369b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/jmc.dat differ diff --git a/awx/lib/site-packages/babel/localedata/jmc_TZ.dat b/awx/lib/site-packages/babel/localedata/jmc_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/jmc_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ka.dat b/awx/lib/site-packages/babel/localedata/ka.dat new file mode 100644 index 0000000000..1979e8aaf1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ka.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ka_GE.dat b/awx/lib/site-packages/babel/localedata/ka_GE.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ka_GE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kab.dat b/awx/lib/site-packages/babel/localedata/kab.dat new file mode 100644 index 0000000000..409ba3b0a9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kab.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kab_DZ.dat b/awx/lib/site-packages/babel/localedata/kab_DZ.dat new file mode 100644 index 0000000000..01c29af978 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/kab_DZ.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/kam.dat b/awx/lib/site-packages/babel/localedata/kam.dat new file mode 100644 index 0000000000..70c78d0556 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kam.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kam_KE.dat b/awx/lib/site-packages/babel/localedata/kam_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/kam_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/kde.dat b/awx/lib/site-packages/babel/localedata/kde.dat new file mode 100644 index 0000000000..3468c0687d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kde.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kde_TZ.dat b/awx/lib/site-packages/babel/localedata/kde_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kde_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kea.dat b/awx/lib/site-packages/babel/localedata/kea.dat new file mode 100644 index 0000000000..36e669e1e1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kea.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kea_CV.dat b/awx/lib/site-packages/babel/localedata/kea_CV.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kea_CV.dat differ diff --git a/awx/lib/site-packages/babel/localedata/khq.dat b/awx/lib/site-packages/babel/localedata/khq.dat new file mode 100644 index 0000000000..4e900a7809 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/khq.dat differ diff --git a/awx/lib/site-packages/babel/localedata/khq_ML.dat b/awx/lib/site-packages/babel/localedata/khq_ML.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/khq_ML.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ki.dat b/awx/lib/site-packages/babel/localedata/ki.dat new file mode 100644 index 0000000000..d30ce01f50 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ki.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ki_KE.dat b/awx/lib/site-packages/babel/localedata/ki_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ki_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/kk.dat b/awx/lib/site-packages/babel/localedata/kk.dat new file mode 100644 index 0000000000..3125ea545d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kk.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kk_Cyrl.dat b/awx/lib/site-packages/babel/localedata/kk_Cyrl.dat new file mode 100644 index 0000000000..27760a1c92 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kk_Cyrl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kk_Cyrl_KZ.dat b/awx/lib/site-packages/babel/localedata/kk_Cyrl_KZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kk_Cyrl_KZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kkj.dat b/awx/lib/site-packages/babel/localedata/kkj.dat new file mode 100644 index 0000000000..bd2d7b08a2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kkj.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kkj_CM.dat b/awx/lib/site-packages/babel/localedata/kkj_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kkj_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kl.dat b/awx/lib/site-packages/babel/localedata/kl.dat new file mode 100644 index 0000000000..5c13a69f58 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kl_GL.dat b/awx/lib/site-packages/babel/localedata/kl_GL.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/kl_GL.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/kln.dat b/awx/lib/site-packages/babel/localedata/kln.dat new file mode 100644 index 0000000000..a5cc035256 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kln.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kln_KE.dat b/awx/lib/site-packages/babel/localedata/kln_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/kln_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/km.dat b/awx/lib/site-packages/babel/localedata/km.dat new file mode 100644 index 0000000000..4613359cfd Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/km.dat differ diff --git a/awx/lib/site-packages/babel/localedata/km_KH.dat b/awx/lib/site-packages/babel/localedata/km_KH.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/km_KH.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/kn.dat b/awx/lib/site-packages/babel/localedata/kn.dat new file mode 100644 index 0000000000..c136cc5fed Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kn_IN.dat b/awx/lib/site-packages/babel/localedata/kn_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/kn_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ko.dat b/awx/lib/site-packages/babel/localedata/ko.dat new file mode 100644 index 0000000000..5910e0d6d5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ko.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ko_KP.dat b/awx/lib/site-packages/babel/localedata/ko_KP.dat new file mode 100644 index 0000000000..4b4537f74c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ko_KP.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ko_KR.dat b/awx/lib/site-packages/babel/localedata/ko_KR.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ko_KR.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/kok.dat b/awx/lib/site-packages/babel/localedata/kok.dat new file mode 100644 index 0000000000..012c2e20ea Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kok.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kok_IN.dat b/awx/lib/site-packages/babel/localedata/kok_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/kok_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ks.dat b/awx/lib/site-packages/babel/localedata/ks.dat new file mode 100644 index 0000000000..bd4a42723c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ks.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ks_Arab.dat b/awx/lib/site-packages/babel/localedata/ks_Arab.dat new file mode 100644 index 0000000000..27760a1c92 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ks_Arab.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ks_Arab_IN.dat b/awx/lib/site-packages/babel/localedata/ks_Arab_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ks_Arab_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ksb.dat b/awx/lib/site-packages/babel/localedata/ksb.dat new file mode 100644 index 0000000000..4a1600561e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ksb.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ksb_TZ.dat b/awx/lib/site-packages/babel/localedata/ksb_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ksb_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ksf.dat b/awx/lib/site-packages/babel/localedata/ksf.dat new file mode 100644 index 0000000000..11881d4407 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ksf.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ksf_CM.dat b/awx/lib/site-packages/babel/localedata/ksf_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ksf_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ksh.dat b/awx/lib/site-packages/babel/localedata/ksh.dat new file mode 100644 index 0000000000..385e52e44a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ksh.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ksh_DE.dat b/awx/lib/site-packages/babel/localedata/ksh_DE.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ksh_DE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kw.dat b/awx/lib/site-packages/babel/localedata/kw.dat new file mode 100644 index 0000000000..4056d0ad99 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/kw.dat differ diff --git a/awx/lib/site-packages/babel/localedata/kw_GB.dat b/awx/lib/site-packages/babel/localedata/kw_GB.dat new file mode 100644 index 0000000000..401708ffe0 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/kw_GB.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ky.dat b/awx/lib/site-packages/babel/localedata/ky.dat new file mode 100644 index 0000000000..cff43ec904 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ky.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ky_KG.dat b/awx/lib/site-packages/babel/localedata/ky_KG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ky_KG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lag.dat b/awx/lib/site-packages/babel/localedata/lag.dat new file mode 100644 index 0000000000..922e70fb30 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lag.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lag_TZ.dat b/awx/lib/site-packages/babel/localedata/lag_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lag_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lg.dat b/awx/lib/site-packages/babel/localedata/lg.dat new file mode 100644 index 0000000000..dbbf1eba33 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lg.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lg_UG.dat b/awx/lib/site-packages/babel/localedata/lg_UG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lg_UG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ln.dat b/awx/lib/site-packages/babel/localedata/ln.dat new file mode 100644 index 0000000000..320079bba8 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ln.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ln_AO.dat b/awx/lib/site-packages/babel/localedata/ln_AO.dat new file mode 100644 index 0000000000..dc23f4ecc1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ln_AO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ln_CD.dat b/awx/lib/site-packages/babel/localedata/ln_CD.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ln_CD.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ln_CF.dat b/awx/lib/site-packages/babel/localedata/ln_CF.dat new file mode 100644 index 0000000000..4c19ce3b40 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ln_CF.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqML U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ln_CG.dat b/awx/lib/site-packages/babel/localedata/ln_CG.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ln_CG.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/lo.dat b/awx/lib/site-packages/babel/localedata/lo.dat new file mode 100644 index 0000000000..561bad36b2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lo_LA.dat b/awx/lib/site-packages/babel/localedata/lo_LA.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/lo_LA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/lt.dat b/awx/lib/site-packages/babel/localedata/lt.dat new file mode 100644 index 0000000000..c05b9188a8 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lt.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lt_LT.dat b/awx/lib/site-packages/babel/localedata/lt_LT.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lt_LT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lu.dat b/awx/lib/site-packages/babel/localedata/lu.dat new file mode 100644 index 0000000000..5d49557305 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lu.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lu_CD.dat b/awx/lib/site-packages/babel/localedata/lu_CD.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/lu_CD.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/luo.dat b/awx/lib/site-packages/babel/localedata/luo.dat new file mode 100644 index 0000000000..326e796e29 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/luo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/luo_KE.dat b/awx/lib/site-packages/babel/localedata/luo_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/luo_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/luy.dat b/awx/lib/site-packages/babel/localedata/luy.dat new file mode 100644 index 0000000000..b9706a1feb Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/luy.dat differ diff --git a/awx/lib/site-packages/babel/localedata/luy_KE.dat b/awx/lib/site-packages/babel/localedata/luy_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/luy_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/lv.dat b/awx/lib/site-packages/babel/localedata/lv.dat new file mode 100644 index 0000000000..a3f770d489 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lv.dat differ diff --git a/awx/lib/site-packages/babel/localedata/lv_LV.dat b/awx/lib/site-packages/babel/localedata/lv_LV.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/lv_LV.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mas.dat b/awx/lib/site-packages/babel/localedata/mas.dat new file mode 100644 index 0000000000..6f17d0de79 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mas.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mas_KE.dat b/awx/lib/site-packages/babel/localedata/mas_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/mas_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/mas_TZ.dat b/awx/lib/site-packages/babel/localedata/mas_TZ.dat new file mode 100644 index 0000000000..5e7539548b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mas_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mer.dat b/awx/lib/site-packages/babel/localedata/mer.dat new file mode 100644 index 0000000000..265eac28a2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mer.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mer_KE.dat b/awx/lib/site-packages/babel/localedata/mer_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/mer_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/mfe.dat b/awx/lib/site-packages/babel/localedata/mfe.dat new file mode 100644 index 0000000000..e146820f92 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mfe.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mfe_MU.dat b/awx/lib/site-packages/babel/localedata/mfe_MU.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mfe_MU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mg.dat b/awx/lib/site-packages/babel/localedata/mg.dat new file mode 100644 index 0000000000..bbab19f030 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mg.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mg_MG.dat b/awx/lib/site-packages/babel/localedata/mg_MG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mg_MG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mgh.dat b/awx/lib/site-packages/babel/localedata/mgh.dat new file mode 100644 index 0000000000..12987fc6df Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mgh.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mgh_MZ.dat b/awx/lib/site-packages/babel/localedata/mgh_MZ.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/mgh_MZ.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/mgo.dat b/awx/lib/site-packages/babel/localedata/mgo.dat new file mode 100644 index 0000000000..c90ac45f6d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mgo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mgo_CM.dat b/awx/lib/site-packages/babel/localedata/mgo_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mgo_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mk.dat b/awx/lib/site-packages/babel/localedata/mk.dat new file mode 100644 index 0000000000..098b226262 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mk.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mk_MK.dat b/awx/lib/site-packages/babel/localedata/mk_MK.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mk_MK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ml.dat b/awx/lib/site-packages/babel/localedata/ml.dat new file mode 100644 index 0000000000..df293ad9b1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ml.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ml_IN.dat b/awx/lib/site-packages/babel/localedata/ml_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ml_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/mn.dat b/awx/lib/site-packages/babel/localedata/mn.dat new file mode 100644 index 0000000000..8b3efac675 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mn_Cyrl.dat b/awx/lib/site-packages/babel/localedata/mn_Cyrl.dat new file mode 100644 index 0000000000..27760a1c92 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mn_Cyrl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mn_Cyrl_MN.dat b/awx/lib/site-packages/babel/localedata/mn_Cyrl_MN.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mn_Cyrl_MN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mr.dat b/awx/lib/site-packages/babel/localedata/mr.dat new file mode 100644 index 0000000000..29dde4e7ea Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mr.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mr_IN.dat b/awx/lib/site-packages/babel/localedata/mr_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/mr_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ms.dat b/awx/lib/site-packages/babel/localedata/ms.dat new file mode 100644 index 0000000000..a6974ea4c4 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ms.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ms_Latn.dat b/awx/lib/site-packages/babel/localedata/ms_Latn.dat new file mode 100644 index 0000000000..1ba8202e3a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ms_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ms_Latn_BN.dat b/awx/lib/site-packages/babel/localedata/ms_Latn_BN.dat new file mode 100644 index 0000000000..d76b0c96ce Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ms_Latn_BN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ms_Latn_MY.dat b/awx/lib/site-packages/babel/localedata/ms_Latn_MY.dat new file mode 100644 index 0000000000..5080866361 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ms_Latn_MY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ms_Latn_SG.dat b/awx/lib/site-packages/babel/localedata/ms_Latn_SG.dat new file mode 100644 index 0000000000..5120c3052f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ms_Latn_SG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mt.dat b/awx/lib/site-packages/babel/localedata/mt.dat new file mode 100644 index 0000000000..cbf2feb17d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mt.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mt_MT.dat b/awx/lib/site-packages/babel/localedata/mt_MT.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/mt_MT.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/mua.dat b/awx/lib/site-packages/babel/localedata/mua.dat new file mode 100644 index 0000000000..dd4881c8fa Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mua.dat differ diff --git a/awx/lib/site-packages/babel/localedata/mua_CM.dat b/awx/lib/site-packages/babel/localedata/mua_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/mua_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/my.dat b/awx/lib/site-packages/babel/localedata/my.dat new file mode 100644 index 0000000000..6482e5ea0f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/my.dat differ diff --git a/awx/lib/site-packages/babel/localedata/my_MM.dat b/awx/lib/site-packages/babel/localedata/my_MM.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/my_MM.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/naq.dat b/awx/lib/site-packages/babel/localedata/naq.dat new file mode 100644 index 0000000000..87fc01f852 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/naq.dat differ diff --git a/awx/lib/site-packages/babel/localedata/naq_NA.dat b/awx/lib/site-packages/babel/localedata/naq_NA.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/naq_NA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nb.dat b/awx/lib/site-packages/babel/localedata/nb.dat new file mode 100644 index 0000000000..aa34863b4e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nb.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nb_NO.dat b/awx/lib/site-packages/babel/localedata/nb_NO.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nb_NO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nd.dat b/awx/lib/site-packages/babel/localedata/nd.dat new file mode 100644 index 0000000000..231008ec88 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nd.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nd_ZW.dat b/awx/lib/site-packages/babel/localedata/nd_ZW.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/nd_ZW.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ne.dat b/awx/lib/site-packages/babel/localedata/ne.dat new file mode 100644 index 0000000000..d078fcf401 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ne.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ne_IN.dat b/awx/lib/site-packages/babel/localedata/ne_IN.dat new file mode 100644 index 0000000000..8044567a61 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ne_IN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ne_NP.dat b/awx/lib/site-packages/babel/localedata/ne_NP.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ne_NP.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/nl.dat b/awx/lib/site-packages/babel/localedata/nl.dat new file mode 100644 index 0000000000..ace2ad6470 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nl_AW.dat b/awx/lib/site-packages/babel/localedata/nl_AW.dat new file mode 100644 index 0000000000..6f68c997cd Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nl_AW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nl_BE.dat b/awx/lib/site-packages/babel/localedata/nl_BE.dat new file mode 100644 index 0000000000..c722c78caa Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nl_BE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nl_CW.dat b/awx/lib/site-packages/babel/localedata/nl_CW.dat new file mode 100644 index 0000000000..11b34d922f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nl_CW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nl_NL.dat b/awx/lib/site-packages/babel/localedata/nl_NL.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nl_NL.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nl_SR.dat b/awx/lib/site-packages/babel/localedata/nl_SR.dat new file mode 100644 index 0000000000..91804fbfc9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nl_SR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nl_SX.dat b/awx/lib/site-packages/babel/localedata/nl_SX.dat new file mode 100644 index 0000000000..11b34d922f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nl_SX.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nmg.dat b/awx/lib/site-packages/babel/localedata/nmg.dat new file mode 100644 index 0000000000..41987a1ec7 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nmg.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nmg_CM.dat b/awx/lib/site-packages/babel/localedata/nmg_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nmg_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nn.dat b/awx/lib/site-packages/babel/localedata/nn.dat new file mode 100644 index 0000000000..c810a884d0 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nn_NO.dat b/awx/lib/site-packages/babel/localedata/nn_NO.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nn_NO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nnh.dat b/awx/lib/site-packages/babel/localedata/nnh.dat new file mode 100644 index 0000000000..28dbf242e6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nnh.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nnh_CM.dat b/awx/lib/site-packages/babel/localedata/nnh_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nnh_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nr.dat b/awx/lib/site-packages/babel/localedata/nr.dat new file mode 100644 index 0000000000..019afc0a40 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nr.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nr_ZA.dat b/awx/lib/site-packages/babel/localedata/nr_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/nr_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/nso.dat b/awx/lib/site-packages/babel/localedata/nso.dat new file mode 100644 index 0000000000..0a37bd063b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nso.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nso_ZA.dat b/awx/lib/site-packages/babel/localedata/nso_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/nso_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/nus.dat b/awx/lib/site-packages/babel/localedata/nus.dat new file mode 100644 index 0000000000..3ad9c0cc18 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nus.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nus_SD.dat b/awx/lib/site-packages/babel/localedata/nus_SD.dat new file mode 100644 index 0000000000..01c29af978 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/nus_SD.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/nyn.dat b/awx/lib/site-packages/babel/localedata/nyn.dat new file mode 100644 index 0000000000..f13b314bd2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nyn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/nyn_UG.dat b/awx/lib/site-packages/babel/localedata/nyn_UG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/nyn_UG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/om.dat b/awx/lib/site-packages/babel/localedata/om.dat new file mode 100644 index 0000000000..53213cfe23 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/om.dat differ diff --git a/awx/lib/site-packages/babel/localedata/om_ET.dat b/awx/lib/site-packages/babel/localedata/om_ET.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/om_ET.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/om_KE.dat b/awx/lib/site-packages/babel/localedata/om_KE.dat new file mode 100644 index 0000000000..b126edd7f1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/om_KE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/or.dat b/awx/lib/site-packages/babel/localedata/or.dat new file mode 100644 index 0000000000..c01ab44228 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/or.dat differ diff --git a/awx/lib/site-packages/babel/localedata/or_IN.dat b/awx/lib/site-packages/babel/localedata/or_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/or_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/os.dat b/awx/lib/site-packages/babel/localedata/os.dat new file mode 100644 index 0000000000..0a01ac8b1c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/os.dat differ diff --git a/awx/lib/site-packages/babel/localedata/os_GE.dat b/awx/lib/site-packages/babel/localedata/os_GE.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/os_GE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/os_RU.dat b/awx/lib/site-packages/babel/localedata/os_RU.dat new file mode 100644 index 0000000000..b1b0d843d5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/os_RU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pa.dat b/awx/lib/site-packages/babel/localedata/pa.dat new file mode 100644 index 0000000000..100c377967 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pa.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pa_Arab.dat b/awx/lib/site-packages/babel/localedata/pa_Arab.dat new file mode 100644 index 0000000000..b32025cf66 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pa_Arab.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pa_Arab_PK.dat b/awx/lib/site-packages/babel/localedata/pa_Arab_PK.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/pa_Arab_PK.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/pa_Guru.dat b/awx/lib/site-packages/babel/localedata/pa_Guru.dat new file mode 100644 index 0000000000..27760a1c92 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pa_Guru.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pa_Guru_IN.dat b/awx/lib/site-packages/babel/localedata/pa_Guru_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/pa_Guru_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/pl.dat b/awx/lib/site-packages/babel/localedata/pl.dat new file mode 100644 index 0000000000..11abe814dc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pl_PL.dat b/awx/lib/site-packages/babel/localedata/pl_PL.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pl_PL.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ps.dat b/awx/lib/site-packages/babel/localedata/ps.dat new file mode 100644 index 0000000000..6521e4b33f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ps.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ps_AF.dat b/awx/lib/site-packages/babel/localedata/ps_AF.dat new file mode 100644 index 0000000000..150c7e3b45 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ps_AF.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/pt.dat b/awx/lib/site-packages/babel/localedata/pt.dat new file mode 100644 index 0000000000..bf22a14e06 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pt_AO.dat b/awx/lib/site-packages/babel/localedata/pt_AO.dat new file mode 100644 index 0000000000..cd4cd79d3d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt_AO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pt_BR.dat b/awx/lib/site-packages/babel/localedata/pt_BR.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/pt_BR.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/pt_CV.dat b/awx/lib/site-packages/babel/localedata/pt_CV.dat new file mode 100644 index 0000000000..98f10615f6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt_CV.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pt_GW.dat b/awx/lib/site-packages/babel/localedata/pt_GW.dat new file mode 100644 index 0000000000..98f10615f6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt_GW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pt_MO.dat b/awx/lib/site-packages/babel/localedata/pt_MO.dat new file mode 100644 index 0000000000..d40c46f1fc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt_MO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pt_MZ.dat b/awx/lib/site-packages/babel/localedata/pt_MZ.dat new file mode 100644 index 0000000000..bfd2551065 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt_MZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pt_PT.dat b/awx/lib/site-packages/babel/localedata/pt_PT.dat new file mode 100644 index 0000000000..203b692c8a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt_PT.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pt_ST.dat b/awx/lib/site-packages/babel/localedata/pt_ST.dat new file mode 100644 index 0000000000..30882bdedf Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt_ST.dat differ diff --git a/awx/lib/site-packages/babel/localedata/pt_TL.dat b/awx/lib/site-packages/babel/localedata/pt_TL.dat new file mode 100644 index 0000000000..6b5a0f9f72 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/pt_TL.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rm.dat b/awx/lib/site-packages/babel/localedata/rm.dat new file mode 100644 index 0000000000..c03c5aed97 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rm.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rm_CH.dat b/awx/lib/site-packages/babel/localedata/rm_CH.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rm_CH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rn.dat b/awx/lib/site-packages/babel/localedata/rn.dat new file mode 100644 index 0000000000..50efb2e990 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rn_BI.dat b/awx/lib/site-packages/babel/localedata/rn_BI.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rn_BI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ro.dat b/awx/lib/site-packages/babel/localedata/ro.dat new file mode 100644 index 0000000000..18ee70fa44 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ro.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ro_MD.dat b/awx/lib/site-packages/babel/localedata/ro_MD.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ro_MD.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ro_RO.dat b/awx/lib/site-packages/babel/localedata/ro_RO.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ro_RO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rof.dat b/awx/lib/site-packages/babel/localedata/rof.dat new file mode 100644 index 0000000000..28f38de9cb Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rof.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rof_TZ.dat b/awx/lib/site-packages/babel/localedata/rof_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rof_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/root.dat b/awx/lib/site-packages/babel/localedata/root.dat new file mode 100644 index 0000000000..80ab0eb763 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/root.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ru.dat b/awx/lib/site-packages/babel/localedata/ru.dat new file mode 100644 index 0000000000..fc116df80c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ru.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ru_BY.dat b/awx/lib/site-packages/babel/localedata/ru_BY.dat new file mode 100644 index 0000000000..640cb7967e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ru_BY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ru_KG.dat b/awx/lib/site-packages/babel/localedata/ru_KG.dat new file mode 100644 index 0000000000..87f3389642 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ru_KG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ru_KZ.dat b/awx/lib/site-packages/babel/localedata/ru_KZ.dat new file mode 100644 index 0000000000..62d0b24636 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ru_KZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ru_MD.dat b/awx/lib/site-packages/babel/localedata/ru_MD.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ru_MD.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ru_RU.dat b/awx/lib/site-packages/babel/localedata/ru_RU.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ru_RU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ru_UA.dat b/awx/lib/site-packages/babel/localedata/ru_UA.dat new file mode 100644 index 0000000000..c726ba8109 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ru_UA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rw.dat b/awx/lib/site-packages/babel/localedata/rw.dat new file mode 100644 index 0000000000..0403c67b76 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rw.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rw_RW.dat b/awx/lib/site-packages/babel/localedata/rw_RW.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rw_RW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rwk.dat b/awx/lib/site-packages/babel/localedata/rwk.dat new file mode 100644 index 0000000000..d18d314480 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rwk.dat differ diff --git a/awx/lib/site-packages/babel/localedata/rwk_TZ.dat b/awx/lib/site-packages/babel/localedata/rwk_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/rwk_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sah.dat b/awx/lib/site-packages/babel/localedata/sah.dat new file mode 100644 index 0000000000..38fb037c5b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sah.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sah_RU.dat b/awx/lib/site-packages/babel/localedata/sah_RU.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sah_RU.dat differ diff --git a/awx/lib/site-packages/babel/localedata/saq.dat b/awx/lib/site-packages/babel/localedata/saq.dat new file mode 100644 index 0000000000..858cb6a020 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/saq.dat differ diff --git a/awx/lib/site-packages/babel/localedata/saq_KE.dat b/awx/lib/site-packages/babel/localedata/saq_KE.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/saq_KE.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/sbp.dat b/awx/lib/site-packages/babel/localedata/sbp.dat new file mode 100644 index 0000000000..36f5081126 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sbp.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sbp_TZ.dat b/awx/lib/site-packages/babel/localedata/sbp_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sbp_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/se.dat b/awx/lib/site-packages/babel/localedata/se.dat new file mode 100644 index 0000000000..4093ad3e75 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/se.dat differ diff --git a/awx/lib/site-packages/babel/localedata/se_FI.dat b/awx/lib/site-packages/babel/localedata/se_FI.dat new file mode 100644 index 0000000000..561ae0fdb8 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/se_FI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/se_NO.dat b/awx/lib/site-packages/babel/localedata/se_NO.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/se_NO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/seh.dat b/awx/lib/site-packages/babel/localedata/seh.dat new file mode 100644 index 0000000000..639220100f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/seh.dat differ diff --git a/awx/lib/site-packages/babel/localedata/seh_MZ.dat b/awx/lib/site-packages/babel/localedata/seh_MZ.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/seh_MZ.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ses.dat b/awx/lib/site-packages/babel/localedata/ses.dat new file mode 100644 index 0000000000..d537b3f856 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ses.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ses_ML.dat b/awx/lib/site-packages/babel/localedata/ses_ML.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ses_ML.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sg.dat b/awx/lib/site-packages/babel/localedata/sg.dat new file mode 100644 index 0000000000..f7e398c823 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sg.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sg_CF.dat b/awx/lib/site-packages/babel/localedata/sg_CF.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/sg_CF.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/shi.dat b/awx/lib/site-packages/babel/localedata/shi.dat new file mode 100644 index 0000000000..8adbb59a88 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/shi.dat differ diff --git a/awx/lib/site-packages/babel/localedata/shi_Latn.dat b/awx/lib/site-packages/babel/localedata/shi_Latn.dat new file mode 100644 index 0000000000..1cb6147cbc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/shi_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/shi_Latn_MA.dat b/awx/lib/site-packages/babel/localedata/shi_Latn_MA.dat new file mode 100644 index 0000000000..01c29af978 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/shi_Latn_MA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/shi_Tfng.dat b/awx/lib/site-packages/babel/localedata/shi_Tfng.dat new file mode 100644 index 0000000000..21966f5f74 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/shi_Tfng.dat differ diff --git a/awx/lib/site-packages/babel/localedata/shi_Tfng_MA.dat b/awx/lib/site-packages/babel/localedata/shi_Tfng_MA.dat new file mode 100644 index 0000000000..01c29af978 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/shi_Tfng_MA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/si.dat b/awx/lib/site-packages/babel/localedata/si.dat new file mode 100644 index 0000000000..ecb30a7658 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/si.dat differ diff --git a/awx/lib/site-packages/babel/localedata/si_LK.dat b/awx/lib/site-packages/babel/localedata/si_LK.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/si_LK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sk.dat b/awx/lib/site-packages/babel/localedata/sk.dat new file mode 100644 index 0000000000..59fa86d2d6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sk.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sk_SK.dat b/awx/lib/site-packages/babel/localedata/sk_SK.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sk_SK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sl.dat b/awx/lib/site-packages/babel/localedata/sl.dat new file mode 100644 index 0000000000..7810cc351c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sl_SI.dat b/awx/lib/site-packages/babel/localedata/sl_SI.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sl_SI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sn.dat b/awx/lib/site-packages/babel/localedata/sn.dat new file mode 100644 index 0000000000..29c97a36e1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sn_ZW.dat b/awx/lib/site-packages/babel/localedata/sn_ZW.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/sn_ZW.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/so.dat b/awx/lib/site-packages/babel/localedata/so.dat new file mode 100644 index 0000000000..7844a44567 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/so.dat differ diff --git a/awx/lib/site-packages/babel/localedata/so_DJ.dat b/awx/lib/site-packages/babel/localedata/so_DJ.dat new file mode 100644 index 0000000000..b0ee442122 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/so_DJ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/so_ET.dat b/awx/lib/site-packages/babel/localedata/so_ET.dat new file mode 100644 index 0000000000..b432229662 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/so_ET.dat differ diff --git a/awx/lib/site-packages/babel/localedata/so_KE.dat b/awx/lib/site-packages/babel/localedata/so_KE.dat new file mode 100644 index 0000000000..b126edd7f1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/so_KE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/so_SO.dat b/awx/lib/site-packages/babel/localedata/so_SO.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/so_SO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sq.dat b/awx/lib/site-packages/babel/localedata/sq.dat new file mode 100644 index 0000000000..a03fcf0a3d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sq.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sq_AL.dat b/awx/lib/site-packages/babel/localedata/sq_AL.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sq_AL.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sq_MK.dat b/awx/lib/site-packages/babel/localedata/sq_MK.dat new file mode 100644 index 0000000000..65bfa5c8c9 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sq_MK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sq_XK.dat b/awx/lib/site-packages/babel/localedata/sq_XK.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sq_XK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr.dat b/awx/lib/site-packages/babel/localedata/sr.dat new file mode 100644 index 0000000000..ef9cd2cc33 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Cyrl.dat b/awx/lib/site-packages/babel/localedata/sr_Cyrl.dat new file mode 100644 index 0000000000..a882c5bcaf Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Cyrl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Cyrl_BA.dat b/awx/lib/site-packages/babel/localedata/sr_Cyrl_BA.dat new file mode 100644 index 0000000000..8d9fa9f5a3 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Cyrl_BA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Cyrl_ME.dat b/awx/lib/site-packages/babel/localedata/sr_Cyrl_ME.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Cyrl_ME.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Cyrl_RS.dat b/awx/lib/site-packages/babel/localedata/sr_Cyrl_RS.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Cyrl_RS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Cyrl_XK.dat b/awx/lib/site-packages/babel/localedata/sr_Cyrl_XK.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Cyrl_XK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Latn.dat b/awx/lib/site-packages/babel/localedata/sr_Latn.dat new file mode 100644 index 0000000000..57b44f1ea7 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Latn_BA.dat b/awx/lib/site-packages/babel/localedata/sr_Latn_BA.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Latn_BA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Latn_ME.dat b/awx/lib/site-packages/babel/localedata/sr_Latn_ME.dat new file mode 100644 index 0000000000..b0f4d9b306 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Latn_ME.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Latn_RS.dat b/awx/lib/site-packages/babel/localedata/sr_Latn_RS.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Latn_RS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sr_Latn_XK.dat b/awx/lib/site-packages/babel/localedata/sr_Latn_XK.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sr_Latn_XK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ss.dat b/awx/lib/site-packages/babel/localedata/ss.dat new file mode 100644 index 0000000000..c7807243d0 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ss.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ss_SZ.dat b/awx/lib/site-packages/babel/localedata/ss_SZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ss_SZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ss_ZA.dat b/awx/lib/site-packages/babel/localedata/ss_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ss_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ssy.dat b/awx/lib/site-packages/babel/localedata/ssy.dat new file mode 100644 index 0000000000..926fd85d84 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ssy.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ssy_ER.dat b/awx/lib/site-packages/babel/localedata/ssy_ER.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ssy_ER.dat differ diff --git a/awx/lib/site-packages/babel/localedata/st.dat b/awx/lib/site-packages/babel/localedata/st.dat new file mode 100644 index 0000000000..d19795e3e2 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/st.dat differ diff --git a/awx/lib/site-packages/babel/localedata/st_LS.dat b/awx/lib/site-packages/babel/localedata/st_LS.dat new file mode 100644 index 0000000000..348a167849 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/st_LS.dat differ diff --git a/awx/lib/site-packages/babel/localedata/st_ZA.dat b/awx/lib/site-packages/babel/localedata/st_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/st_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/sv.dat b/awx/lib/site-packages/babel/localedata/sv.dat new file mode 100644 index 0000000000..bce70f1513 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sv.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sv_AX.dat b/awx/lib/site-packages/babel/localedata/sv_AX.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sv_AX.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sv_FI.dat b/awx/lib/site-packages/babel/localedata/sv_FI.dat new file mode 100644 index 0000000000..f42189dcea Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sv_FI.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sv_SE.dat b/awx/lib/site-packages/babel/localedata/sv_SE.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sv_SE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sw.dat b/awx/lib/site-packages/babel/localedata/sw.dat new file mode 100644 index 0000000000..6c525cad5e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sw.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sw_KE.dat b/awx/lib/site-packages/babel/localedata/sw_KE.dat new file mode 100644 index 0000000000..3c922118c0 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sw_KE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sw_TZ.dat b/awx/lib/site-packages/babel/localedata/sw_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sw_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/sw_UG.dat b/awx/lib/site-packages/babel/localedata/sw_UG.dat new file mode 100644 index 0000000000..3b68eaa1a1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/sw_UG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/swc.dat b/awx/lib/site-packages/babel/localedata/swc.dat new file mode 100644 index 0000000000..247ea25727 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/swc.dat differ diff --git a/awx/lib/site-packages/babel/localedata/swc_CD.dat b/awx/lib/site-packages/babel/localedata/swc_CD.dat new file mode 100644 index 0000000000..a7cee6f283 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/swc_CD.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ta.dat b/awx/lib/site-packages/babel/localedata/ta.dat new file mode 100644 index 0000000000..9115227a99 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ta.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ta_IN.dat b/awx/lib/site-packages/babel/localedata/ta_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ta_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ta_LK.dat b/awx/lib/site-packages/babel/localedata/ta_LK.dat new file mode 100644 index 0000000000..482cab29ef Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ta_LK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ta_MY.dat b/awx/lib/site-packages/babel/localedata/ta_MY.dat new file mode 100644 index 0000000000..d3980d49fe Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ta_MY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ta_SG.dat b/awx/lib/site-packages/babel/localedata/ta_SG.dat new file mode 100644 index 0000000000..d2b3c0c963 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ta_SG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/te.dat b/awx/lib/site-packages/babel/localedata/te.dat new file mode 100644 index 0000000000..82dde54f43 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/te.dat differ diff --git a/awx/lib/site-packages/babel/localedata/te_IN.dat b/awx/lib/site-packages/babel/localedata/te_IN.dat new file mode 100644 index 0000000000..1564619c55 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/te_IN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/teo.dat b/awx/lib/site-packages/babel/localedata/teo.dat new file mode 100644 index 0000000000..15490c3aa6 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/teo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/teo_KE.dat b/awx/lib/site-packages/babel/localedata/teo_KE.dat new file mode 100644 index 0000000000..b126edd7f1 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/teo_KE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/teo_UG.dat b/awx/lib/site-packages/babel/localedata/teo_UG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/teo_UG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tg.dat b/awx/lib/site-packages/babel/localedata/tg.dat new file mode 100644 index 0000000000..2ca7e3c87d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tg.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tg_Cyrl.dat b/awx/lib/site-packages/babel/localedata/tg_Cyrl.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tg_Cyrl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tg_Cyrl_TJ.dat b/awx/lib/site-packages/babel/localedata/tg_Cyrl_TJ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tg_Cyrl_TJ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/th.dat b/awx/lib/site-packages/babel/localedata/th.dat new file mode 100644 index 0000000000..e052aa592f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/th.dat differ diff --git a/awx/lib/site-packages/babel/localedata/th_TH.dat b/awx/lib/site-packages/babel/localedata/th_TH.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/th_TH.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/ti.dat b/awx/lib/site-packages/babel/localedata/ti.dat new file mode 100644 index 0000000000..b5ce842beb Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ti.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ti_ER.dat b/awx/lib/site-packages/babel/localedata/ti_ER.dat new file mode 100644 index 0000000000..1e2f1e3098 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ti_ER.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ti_ET.dat b/awx/lib/site-packages/babel/localedata/ti_ET.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ti_ET.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/tig.dat b/awx/lib/site-packages/babel/localedata/tig.dat new file mode 100644 index 0000000000..8e82e8f613 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tig.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tig_ER.dat b/awx/lib/site-packages/babel/localedata/tig_ER.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tig_ER.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tn.dat b/awx/lib/site-packages/babel/localedata/tn.dat new file mode 100644 index 0000000000..d1a3042814 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tn_BW.dat b/awx/lib/site-packages/babel/localedata/tn_BW.dat new file mode 100644 index 0000000000..325d2b09dd Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tn_BW.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tn_ZA.dat b/awx/lib/site-packages/babel/localedata/tn_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/tn_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/to.dat b/awx/lib/site-packages/babel/localedata/to.dat new file mode 100644 index 0000000000..80c436141b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/to.dat differ diff --git a/awx/lib/site-packages/babel/localedata/to_TO.dat b/awx/lib/site-packages/babel/localedata/to_TO.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/to_TO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tr.dat b/awx/lib/site-packages/babel/localedata/tr.dat new file mode 100644 index 0000000000..292d00717e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tr.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tr_CY.dat b/awx/lib/site-packages/babel/localedata/tr_CY.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tr_CY.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tr_TR.dat b/awx/lib/site-packages/babel/localedata/tr_TR.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tr_TR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ts.dat b/awx/lib/site-packages/babel/localedata/ts.dat new file mode 100644 index 0000000000..c24c9e40da Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ts.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ts_ZA.dat b/awx/lib/site-packages/babel/localedata/ts_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ts_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/twq.dat b/awx/lib/site-packages/babel/localedata/twq.dat new file mode 100644 index 0000000000..f9c3773a0a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/twq.dat differ diff --git a/awx/lib/site-packages/babel/localedata/twq_NE.dat b/awx/lib/site-packages/babel/localedata/twq_NE.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/twq_NE.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tzm.dat b/awx/lib/site-packages/babel/localedata/tzm.dat new file mode 100644 index 0000000000..2543fb3902 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tzm.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tzm_Latn.dat b/awx/lib/site-packages/babel/localedata/tzm_Latn.dat new file mode 100644 index 0000000000..500194c51b Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/tzm_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/tzm_Latn_MA.dat b/awx/lib/site-packages/babel/localedata/tzm_Latn_MA.dat new file mode 100644 index 0000000000..01c29af978 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/tzm_Latn_MA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/uk.dat b/awx/lib/site-packages/babel/localedata/uk.dat new file mode 100644 index 0000000000..2ce73b767c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/uk.dat differ diff --git a/awx/lib/site-packages/babel/localedata/uk_UA.dat b/awx/lib/site-packages/babel/localedata/uk_UA.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/uk_UA.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ur.dat b/awx/lib/site-packages/babel/localedata/ur.dat new file mode 100644 index 0000000000..325b54153f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ur.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ur_IN.dat b/awx/lib/site-packages/babel/localedata/ur_IN.dat new file mode 100644 index 0000000000..a8f52387cc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ur_IN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ur_PK.dat b/awx/lib/site-packages/babel/localedata/ur_PK.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ur_PK.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/uz.dat b/awx/lib/site-packages/babel/localedata/uz.dat new file mode 100644 index 0000000000..799049cf4c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/uz.dat differ diff --git a/awx/lib/site-packages/babel/localedata/uz_Arab.dat b/awx/lib/site-packages/babel/localedata/uz_Arab.dat new file mode 100644 index 0000000000..29426be7ae Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/uz_Arab.dat differ diff --git a/awx/lib/site-packages/babel/localedata/uz_Arab_AF.dat b/awx/lib/site-packages/babel/localedata/uz_Arab_AF.dat new file mode 100644 index 0000000000..150c7e3b45 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/uz_Arab_AF.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/uz_Cyrl.dat b/awx/lib/site-packages/babel/localedata/uz_Cyrl.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/uz_Cyrl.dat differ diff --git a/awx/lib/site-packages/babel/localedata/uz_Cyrl_UZ.dat b/awx/lib/site-packages/babel/localedata/uz_Cyrl_UZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/uz_Cyrl_UZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/uz_Latn.dat b/awx/lib/site-packages/babel/localedata/uz_Latn.dat new file mode 100644 index 0000000000..febdc7c34f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/uz_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/uz_Latn_UZ.dat b/awx/lib/site-packages/babel/localedata/uz_Latn_UZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/uz_Latn_UZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vai.dat b/awx/lib/site-packages/babel/localedata/vai.dat new file mode 100644 index 0000000000..8decb6febc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vai.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vai_Latn.dat b/awx/lib/site-packages/babel/localedata/vai_Latn.dat new file mode 100644 index 0000000000..8b6be24045 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vai_Latn.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vai_Latn_LR.dat b/awx/lib/site-packages/babel/localedata/vai_Latn_LR.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vai_Latn_LR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vai_Vaii.dat b/awx/lib/site-packages/babel/localedata/vai_Vaii.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vai_Vaii.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vai_Vaii_LR.dat b/awx/lib/site-packages/babel/localedata/vai_Vaii_LR.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vai_Vaii_LR.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ve.dat b/awx/lib/site-packages/babel/localedata/ve.dat new file mode 100644 index 0000000000..a547fffa0a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/ve.dat differ diff --git a/awx/lib/site-packages/babel/localedata/ve_ZA.dat b/awx/lib/site-packages/babel/localedata/ve_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/ve_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/vi.dat b/awx/lib/site-packages/babel/localedata/vi.dat new file mode 100644 index 0000000000..2c1eb2d9fc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vi.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vi_VN.dat b/awx/lib/site-packages/babel/localedata/vi_VN.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vi_VN.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vo.dat b/awx/lib/site-packages/babel/localedata/vo.dat new file mode 100644 index 0000000000..979ea71ed5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vun.dat b/awx/lib/site-packages/babel/localedata/vun.dat new file mode 100644 index 0000000000..f023b1381a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vun.dat differ diff --git a/awx/lib/site-packages/babel/localedata/vun_TZ.dat b/awx/lib/site-packages/babel/localedata/vun_TZ.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/vun_TZ.dat differ diff --git a/awx/lib/site-packages/babel/localedata/wae.dat b/awx/lib/site-packages/babel/localedata/wae.dat new file mode 100644 index 0000000000..98d0869e64 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/wae.dat differ diff --git a/awx/lib/site-packages/babel/localedata/wae_CH.dat b/awx/lib/site-packages/babel/localedata/wae_CH.dat new file mode 100644 index 0000000000..f981f10762 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/wae_CH.dat differ diff --git a/awx/lib/site-packages/babel/localedata/wal.dat b/awx/lib/site-packages/babel/localedata/wal.dat new file mode 100644 index 0000000000..39a5a73b8d Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/wal.dat differ diff --git a/awx/lib/site-packages/babel/localedata/wal_ET.dat b/awx/lib/site-packages/babel/localedata/wal_ET.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/wal_ET.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/xh.dat b/awx/lib/site-packages/babel/localedata/xh.dat new file mode 100644 index 0000000000..86f8bdc36f Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/xh.dat differ diff --git a/awx/lib/site-packages/babel/localedata/xh_ZA.dat b/awx/lib/site-packages/babel/localedata/xh_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/xh_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/xog.dat b/awx/lib/site-packages/babel/localedata/xog.dat new file mode 100644 index 0000000000..f660164e28 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/xog.dat differ diff --git a/awx/lib/site-packages/babel/localedata/xog_UG.dat b/awx/lib/site-packages/babel/localedata/xog_UG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/xog_UG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/yav.dat b/awx/lib/site-packages/babel/localedata/yav.dat new file mode 100644 index 0000000000..43d7148f6a Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/yav.dat differ diff --git a/awx/lib/site-packages/babel/localedata/yav_CM.dat b/awx/lib/site-packages/babel/localedata/yav_CM.dat new file mode 100644 index 0000000000..074603a416 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/yav_CM.dat differ diff --git a/awx/lib/site-packages/babel/localedata/yo.dat b/awx/lib/site-packages/babel/localedata/yo.dat new file mode 100644 index 0000000000..46120718eb Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/yo.dat differ diff --git a/awx/lib/site-packages/babel/localedata/yo_NG.dat b/awx/lib/site-packages/babel/localedata/yo_NG.dat new file mode 100644 index 0000000000..67b749dd3e Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/yo_NG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh.dat b/awx/lib/site-packages/babel/localedata/zh.dat new file mode 100644 index 0000000000..cb916a89ef Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zh.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh_Hans.dat b/awx/lib/site-packages/babel/localedata/zh_Hans.dat new file mode 100644 index 0000000000..980ab6b6fc Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zh_Hans.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh_Hans_CN.dat b/awx/lib/site-packages/babel/localedata/zh_Hans_CN.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/zh_Hans_CN.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/zh_Hans_HK.dat b/awx/lib/site-packages/babel/localedata/zh_Hans_HK.dat new file mode 100644 index 0000000000..3d1a848434 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zh_Hans_HK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh_Hans_MO.dat b/awx/lib/site-packages/babel/localedata/zh_Hans_MO.dat new file mode 100644 index 0000000000..7caa160409 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zh_Hans_MO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh_Hans_SG.dat b/awx/lib/site-packages/babel/localedata/zh_Hans_SG.dat new file mode 100644 index 0000000000..7c57b7bf38 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zh_Hans_SG.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh_Hant.dat b/awx/lib/site-packages/babel/localedata/zh_Hant.dat new file mode 100644 index 0000000000..6e093dd2b5 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zh_Hant.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh_Hant_HK.dat b/awx/lib/site-packages/babel/localedata/zh_Hant_HK.dat new file mode 100644 index 0000000000..f5a59a1eab Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zh_Hant_HK.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh_Hant_MO.dat b/awx/lib/site-packages/babel/localedata/zh_Hant_MO.dat new file mode 100644 index 0000000000..7ce4ddc05c Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zh_Hant_MO.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zh_Hant_TW.dat b/awx/lib/site-packages/babel/localedata/zh_Hant_TW.dat new file mode 100644 index 0000000000..81c8925439 --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/zh_Hant_TW.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localedata/zu.dat b/awx/lib/site-packages/babel/localedata/zu.dat new file mode 100644 index 0000000000..99b984e830 Binary files /dev/null and b/awx/lib/site-packages/babel/localedata/zu.dat differ diff --git a/awx/lib/site-packages/babel/localedata/zu_ZA.dat b/awx/lib/site-packages/babel/localedata/zu_ZA.dat new file mode 100644 index 0000000000..356909133a --- /dev/null +++ b/awx/lib/site-packages/babel/localedata/zu_ZA.dat @@ -0,0 +1,4 @@ +€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq +}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U +time_zonesq}qUscriptsq}qUdecimal_formatsq}qU +meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/awx/lib/site-packages/babel/localtime/__init__.py b/awx/lib/site-packages/babel/localtime/__init__.py new file mode 100644 index 0000000000..cdb3e9b5d5 --- /dev/null +++ b/awx/lib/site-packages/babel/localtime/__init__.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +""" + babel.localtime + ~~~~~~~~~~~~~~~ + + Babel specific fork of tzlocal to determine the local timezone + of the system. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +import sys +import pytz +import time +from datetime import timedelta, datetime +from datetime import tzinfo +from threading import RLock + +if sys.platform == 'win32': + from babel.localtime._win32 import _get_localzone +else: + from babel.localtime._unix import _get_localzone + + +_cached_tz = None +_cache_lock = RLock() + +STDOFFSET = timedelta(seconds = -time.timezone) +if time.daylight: + DSTOFFSET = timedelta(seconds = -time.altzone) +else: + DSTOFFSET = STDOFFSET + +DSTDIFF = DSTOFFSET - STDOFFSET +ZERO = timedelta(0) + + +class _FallbackLocalTimezone(tzinfo): + + def utcoffset(self, dt): + if self._isdst(dt): + return DSTOFFSET + else: + return STDOFFSET + + def dst(self, dt): + if self._isdst(dt): + return DSTDIFF + else: + return ZERO + + def tzname(self, dt): + return time.tzname[self._isdst(dt)] + + def _isdst(self, dt): + tt = (dt.year, dt.month, dt.day, + dt.hour, dt.minute, dt.second, + dt.weekday(), 0, -1) + stamp = time.mktime(tt) + tt = time.localtime(stamp) + return tt.tm_isdst > 0 + + +def get_localzone(): + """Returns the current underlying local timezone object. + Generally this function does not need to be used, it's a + better idea to use the :data:`LOCALTZ` singleton instead. + """ + return _get_localzone() + + +try: + LOCALTZ = get_localzone() +except pytz.UnknownTimeZoneError: + LOCALTZ = _FallbackLocalTimezone() diff --git a/awx/lib/site-packages/babel/localtime/_unix.py b/awx/lib/site-packages/babel/localtime/_unix.py new file mode 100644 index 0000000000..b4a3b599f0 --- /dev/null +++ b/awx/lib/site-packages/babel/localtime/_unix.py @@ -0,0 +1,137 @@ +from __future__ import with_statement +import os +import re +import sys +import pytz +import subprocess + +_systemconfig_tz = re.compile(r'^Time Zone: (.*)$(?m)') + + +def _tz_from_env(tzenv): + if tzenv[0] == ':': + tzenv = tzenv[1:] + + # TZ specifies a file + if os.path.exists(tzenv): + with open(tzenv, 'rb') as tzfile: + return pytz.tzfile.build_tzinfo('local', tzfile) + + # TZ specifies a zoneinfo zone. + try: + tz = pytz.timezone(tzenv) + # That worked, so we return this: + return tz + except pytz.UnknownTimeZoneError: + raise pytz.UnknownTimeZoneError( + "tzlocal() does not support non-zoneinfo timezones like %s. \n" + "Please use a timezone in the form of Continent/City") + +def _get_localzone(_root='/'): + """Tries to find the local timezone configuration. + This method prefers finding the timezone name and passing that to pytz, + over passing in the localtime file, as in the later case the zoneinfo + name is unknown. + The parameter _root makes the function look for files like /etc/localtime + beneath the _root directory. This is primarily used by the tests. + In normal usage you call the function without parameters. + """ + + tzenv = os.environ.get('TZ') + if tzenv: + return _tz_from_env(tzenv) + + # This is actually a pretty reliable way to test for the local time + # zone on operating systems like OS X. On OS X especially this is the + # only one that actually works. + try: + link_dst = os.readlink('/etc/localtime') + except OSError: + pass + else: + pos = link_dst.find('/zoneinfo/') + if pos >= 0: + zone_name = link_dst[pos + 10:] + try: + return pytz.timezone(zone_name) + except pytz.UnknownTimeZoneError: + pass + + # If we are on OS X now we are pretty sure that the rest of the + # code will fail and just fall through until it hits the reading + # of /etc/localtime and using it without name. At this point we + # can invoke systemconfig which internally invokes ICU. ICU itself + # does the same thing we do (readlink + compare file contents) but + # since it knows where the zone files are that should be a bit + # better than reimplementing the logic here. + if sys.platform == 'darwin': + c = subprocess.Popen(['systemsetup', '-gettimezone'], + stdout=subprocess.PIPE) + sys_result = c.communicate()[0] + c.wait() + tz_match = _systemconfig_tz.search(sys_result) + if tz_match is not None: + zone_name = tz_match.group(1) + try: + return pytz.timezone(zone_name) + except pytz.UnknownTimeZoneError: + pass + + # Now look for distribution specific configuration files + # that contain the timezone name. + tzpath = os.path.join(_root, 'etc/timezone') + if os.path.exists(tzpath): + with open(tzpath, 'rb') as tzfile: + data = tzfile.read() + + # Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file. + # That's a misconfiguration, but we need to handle it gracefully: + if data[:5] != 'TZif2': + etctz = data.strip().decode() + # Get rid of host definitions and comments: + if ' ' in etctz: + etctz, dummy = etctz.split(' ', 1) + if '#' in etctz: + etctz, dummy = etctz.split('#', 1) + return pytz.timezone(etctz.replace(' ', '_')) + + # CentOS has a ZONE setting in /etc/sysconfig/clock, + # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and + # Gentoo has a TIMEZONE setting in /etc/conf.d/clock + # We look through these files for a timezone: + zone_re = re.compile('\s*ZONE\s*=\s*\"') + timezone_re = re.compile('\s*TIMEZONE\s*=\s*\"') + end_re = re.compile('\"') + + for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'): + tzpath = os.path.join(_root, filename) + if not os.path.exists(tzpath): + continue + with open(tzpath, 'rt') as tzfile: + data = tzfile.readlines() + + for line in data: + # Look for the ZONE= setting. + match = zone_re.match(line) + if match is None: + # No ZONE= setting. Look for the TIMEZONE= setting. + match = timezone_re.match(line) + if match is not None: + # Some setting existed + line = line[match.end():] + etctz = line[:end_re.search(line).start()] + + # We found a timezone + return pytz.timezone(etctz.replace(' ', '_')) + + # No explicit setting existed. Use localtime + for filename in ('etc/localtime', 'usr/local/etc/localtime'): + tzpath = os.path.join(_root, filename) + + if not os.path.exists(tzpath): + continue + + with open(tzpath, 'rb') as tzfile: + return pytz.tzfile.build_tzinfo('local', tzfile) + + raise pytz.UnknownTimeZoneError('Can not find any timezone configuration') diff --git a/awx/lib/site-packages/babel/localtime/_win32.py b/awx/lib/site-packages/babel/localtime/_win32.py new file mode 100644 index 0000000000..1f6ecc7c0d --- /dev/null +++ b/awx/lib/site-packages/babel/localtime/_win32.py @@ -0,0 +1,89 @@ +try: + import _winreg as winreg +except ImportError: + try: + import winreg + except ImportError: + winreg = None + +from babel.core import get_global +import pytz + + +tz_names = get_global('windows_zone_mapping') + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dict = {} + size = winreg.QueryInfoKey(key)[1] + for i in range(size): + data = winreg.EnumValue(key, i) + dict[data[0]] = data[1] + return dict + + +def get_localzone_name(): + # Windows is special. It has unique time zone names (in several + # meanings of the word) available, but unfortunately, they can be + # translated to the language of the operating system, so we need to + # do a backwards lookup, by going through all time zones and see which + # one matches. + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + + TZLOCALKEYNAME = r'SYSTEM\CurrentControlSet\Control\TimeZoneInformation' + localtz = winreg.OpenKey(handle, TZLOCALKEYNAME) + keyvalues = valuestodict(localtz) + localtz.Close() + if 'TimeZoneKeyName' in keyvalues: + # Windows 7 (and Vista?) + + # For some reason this returns a string with loads of NUL bytes at + # least on some systems. I don't know if this is a bug somewhere, I + # just work around it. + tzkeyname = keyvalues['TimeZoneKeyName'].split('\x00', 1)[0] + else: + # Windows 2000 or XP + + # This is the localized name: + tzwin = keyvalues['StandardName'] + + # Open the list of timezones to look up the real name: + TZKEYNAME = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones' + tzkey = winreg.OpenKey(handle, TZKEYNAME) + + # Now, match this value to Time Zone information + tzkeyname = None + for i in range(winreg.QueryInfoKey(tzkey)[0]): + subkey = winreg.EnumKey(tzkey, i) + sub = winreg.OpenKey(tzkey, subkey) + data = valuestodict(sub) + sub.Close() + if data['Std'] == tzwin: + tzkeyname = subkey + break + + tzkey.Close() + handle.Close() + + if tzkeyname is None: + raise LookupError('Can not find Windows timezone configuration') + + timezone = tz_names.get(tzkeyname) + if timezone is None: + # Nope, that didn't work. Try adding 'Standard Time', + # it seems to work a lot of times: + timezone = tz_names.get(tzkeyname + ' Standard Time') + + # Return what we have. + if timezone is None: + raise pytz.UnknownTimeZoneError('Can not find timezone ' + tzkeyname) + + return timezone + + +def _get_localzone(): + if winreg is None: + raise pytz.UnknownTimeZoneError( + 'Runtime support not available') + return pytz.timezone(get_localzone_name()) diff --git a/awx/lib/site-packages/babel/messages/__init__.py b/awx/lib/site-packages/babel/messages/__init__.py new file mode 100644 index 0000000000..1b63bae2ee --- /dev/null +++ b/awx/lib/site-packages/babel/messages/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" + babel.messages + ~~~~~~~~~~~~~~ + + Support for ``gettext`` message catalogs. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +from babel.messages.catalog import * diff --git a/awx/lib/site-packages/babel/messages/catalog.py b/awx/lib/site-packages/babel/messages/catalog.py new file mode 100644 index 0000000000..501763b584 --- /dev/null +++ b/awx/lib/site-packages/babel/messages/catalog.py @@ -0,0 +1,831 @@ +# -*- coding: utf-8 -*- +""" + babel.messages.catalog + ~~~~~~~~~~~~~~~~~~~~~~ + + Data structures for message catalogs. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +import re +import time + +from cgi import parse_header +from datetime import datetime, time as time_ +from difflib import get_close_matches +from email import message_from_string +from copy import copy + +from babel import __version__ as VERSION +from babel.core import Locale +from babel.dates import format_datetime +from babel.messages.plurals import get_plural +from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone +from babel._compat import string_types, number_types, PY2, cmp + +__all__ = ['Message', 'Catalog', 'TranslationError'] + + +PYTHON_FORMAT = re.compile(r'''(?x) + \% + (?:\(([\w]*)\))? + ( + [-#0\ +]?(?:\*|[\d]+)? + (?:\.(?:\*|[\d]+))? + [hlL]? + ) + ([diouxXeEfFgGcrs%]) +''') + + +class Message(object): + """Representation of a single message in a catalog.""" + + def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(), + user_comments=(), previous_id=(), lineno=None, context=None): + """Create the message object. + + :param id: the message ID, or a ``(singular, plural)`` tuple for + pluralizable messages + :param string: the translated message string, or a + ``(singular, plural)`` tuple for pluralizable messages + :param locations: a sequence of ``(filenname, lineno)`` tuples + :param flags: a set or sequence of flags + :param auto_comments: a sequence of automatic comments for the message + :param user_comments: a sequence of user comments for the message + :param previous_id: the previous message ID, or a ``(singular, plural)`` + tuple for pluralizable messages + :param lineno: the line number on which the msgid line was found in the + PO file, if any + :param context: the message context + """ + self.id = id #: The message ID + if not string and self.pluralizable: + string = (u'', u'') + self.string = string #: The message translation + self.locations = list(distinct(locations)) + self.flags = set(flags) + if id and self.python_format: + self.flags.add('python-format') + else: + self.flags.discard('python-format') + self.auto_comments = list(distinct(auto_comments)) + self.user_comments = list(distinct(user_comments)) + if isinstance(previous_id, string_types): + self.previous_id = [previous_id] + else: + self.previous_id = list(previous_id) + self.lineno = lineno + self.context = context + + def __repr__(self): + return '<%s %r (flags: %r)>' % (type(self).__name__, self.id, + list(self.flags)) + + def __cmp__(self, obj): + """Compare Messages, taking into account plural ids""" + def values_to_compare(): + if isinstance(obj, Message): + plural = self.pluralizable + obj_plural = obj.pluralizable + if plural and obj_plural: + return self.id[0], obj.id[0] + elif plural: + return self.id[0], obj.id + elif obj_plural: + return self.id, obj.id[0] + return self.id, obj.id + this, other = values_to_compare() + return cmp(this, other) + + def __gt__(self, other): + return self.__cmp__(other) > 0 + + def __lt__(self, other): + return self.__cmp__(other) < 0 + + def __ge__(self, other): + return self.__cmp__(other) >= 0 + + def __le__(self, other): + return self.__cmp__(other) <= 0 + + def __eq__(self, other): + return self.__cmp__(other) == 0 + + def __ne__(self, other): + return self.__cmp__(other) != 0 + + def clone(self): + return Message(*map(copy, (self.id, self.string, self.locations, + self.flags, self.auto_comments, + self.user_comments, self.previous_id, + self.lineno, self.context))) + + def check(self, catalog=None): + """Run various validation checks on the message. Some validations + are only performed if the catalog is provided. This method returns + a sequence of `TranslationError` objects. + + :rtype: ``iterator`` + :param catalog: A catalog instance that is passed to the checkers + :see: `Catalog.check` for a way to perform checks for all messages + in a catalog. + """ + from babel.messages.checkers import checkers + errors = [] + for checker in checkers: + try: + checker(catalog, self) + except TranslationError as e: + errors.append(e) + return errors + + @property + def fuzzy(self): + """Whether the translation is fuzzy. + + >>> Message('foo').fuzzy + False + >>> msg = Message('foo', 'foo', flags=['fuzzy']) + >>> msg.fuzzy + True + >>> msg + + + :type: `bool`""" + return 'fuzzy' in self.flags + + @property + def pluralizable(self): + """Whether the message is plurizable. + + >>> Message('foo').pluralizable + False + >>> Message(('foo', 'bar')).pluralizable + True + + :type: `bool`""" + return isinstance(self.id, (list, tuple)) + + @property + def python_format(self): + """Whether the message contains Python-style parameters. + + >>> Message('foo %(name)s bar').python_format + True + >>> Message(('foo %(name)s', 'foo %(name)s')).python_format + True + + :type: `bool`""" + ids = self.id + if not isinstance(ids, (list, tuple)): + ids = [ids] + return any(PYTHON_FORMAT.search(id) for id in ids) + + +class TranslationError(Exception): + """Exception thrown by translation checkers when invalid message + translations are encountered.""" + + +DEFAULT_HEADER = u"""\ +# Translations template for PROJECT. +# Copyright (C) YEAR ORGANIZATION +# This file is distributed under the same license as the PROJECT project. +# FIRST AUTHOR , YEAR. +#""" + + +if PY2: + def _parse_header(header_string): + # message_from_string only works for str, not for unicode + headers = message_from_string(header_string.encode('utf8')) + decoded_headers = {} + for name, value in headers.items(): + name = name.decode('utf8') + value = value.decode('utf8') + decoded_headers[name] = value + return decoded_headers + +else: + _parse_header = message_from_string + + +class Catalog(object): + """Representation of a message catalog.""" + + def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER, + project=None, version=None, copyright_holder=None, + msgid_bugs_address=None, creation_date=None, + revision_date=None, last_translator=None, language_team=None, + charset=None, fuzzy=True): + """Initialize the catalog object. + + :param locale: the locale identifier or `Locale` object, or `None` + if the catalog is not bound to a locale (which basically + means it's a template) + :param domain: the message domain + :param header_comment: the header comment as string, or `None` for the + default header + :param project: the project's name + :param version: the project's version + :param copyright_holder: the copyright holder of the catalog + :param msgid_bugs_address: the email address or URL to submit bug + reports to + :param creation_date: the date the catalog was created + :param revision_date: the date the catalog was revised + :param last_translator: the name and email of the last translator + :param language_team: the name and email of the language team + :param charset: the encoding to use in the output (defaults to utf-8) + :param fuzzy: the fuzzy bit on the catalog header + """ + self.domain = domain #: The message domain + if locale: + locale = Locale.parse(locale) + self.locale = locale #: The locale or `None` + self._header_comment = header_comment + self._messages = odict() + + self.project = project or 'PROJECT' #: The project name + self.version = version or 'VERSION' #: The project version + self.copyright_holder = copyright_holder or 'ORGANIZATION' + self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS' + + self.last_translator = last_translator or 'FULL NAME ' + """Name and email address of the last translator.""" + self.language_team = language_team or 'LANGUAGE ' + """Name and email address of the language team.""" + + self.charset = charset or 'utf-8' + + if creation_date is None: + creation_date = datetime.now(LOCALTZ) + elif isinstance(creation_date, datetime) and not creation_date.tzinfo: + creation_date = creation_date.replace(tzinfo=LOCALTZ) + self.creation_date = creation_date #: Creation date of the template + if revision_date is None: + revision_date = 'YEAR-MO-DA HO:MI+ZONE' + elif isinstance(revision_date, datetime) and not revision_date.tzinfo: + revision_date = revision_date.replace(tzinfo=LOCALTZ) + self.revision_date = revision_date #: Last revision date of the catalog + self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`) + + self.obsolete = odict() #: Dictionary of obsolete messages + self._num_plurals = None + self._plural_expr = None + + def _get_header_comment(self): + comment = self._header_comment + year = datetime.now(LOCALTZ).strftime('%Y') + if hasattr(self.revision_date, 'strftime'): + year = self.revision_date.strftime('%Y') + comment = comment.replace('PROJECT', self.project) \ + .replace('VERSION', self.version) \ + .replace('YEAR', year) \ + .replace('ORGANIZATION', self.copyright_holder) + if self.locale: + comment = comment.replace('Translations template', '%s translations' + % self.locale.english_name) + return comment + + def _set_header_comment(self, string): + self._header_comment = string + + header_comment = property(_get_header_comment, _set_header_comment, doc="""\ + The header comment for the catalog. + + >>> catalog = Catalog(project='Foobar', version='1.0', + ... copyright_holder='Foo Company') + >>> print catalog.header_comment #doctest: +ELLIPSIS + # Translations template for Foobar. + # Copyright (C) ... Foo Company + # This file is distributed under the same license as the Foobar project. + # FIRST AUTHOR , .... + # + + The header can also be set from a string. Any known upper-case variables + will be replaced when the header is retrieved again: + + >>> catalog = Catalog(project='Foobar', version='1.0', + ... copyright_holder='Foo Company') + >>> catalog.header_comment = '''\\ + ... # The POT for my really cool PROJECT project. + ... # Copyright (C) 1990-2003 ORGANIZATION + ... # This file is distributed under the same license as the PROJECT + ... # project. + ... #''' + >>> print catalog.header_comment + # The POT for my really cool Foobar project. + # Copyright (C) 1990-2003 Foo Company + # This file is distributed under the same license as the Foobar + # project. + # + + :type: `unicode` + """) + + def _get_mime_headers(self): + headers = [] + headers.append(('Project-Id-Version', + '%s %s' % (self.project, self.version))) + headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address)) + headers.append(('POT-Creation-Date', + format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ', + locale='en'))) + if isinstance(self.revision_date, (datetime, time_) + number_types): + headers.append(('PO-Revision-Date', + format_datetime(self.revision_date, + 'yyyy-MM-dd HH:mmZ', locale='en'))) + else: + headers.append(('PO-Revision-Date', self.revision_date)) + headers.append(('Last-Translator', self.last_translator)) + if (self.locale is not None) and ('LANGUAGE' in self.language_team): + headers.append(('Language-Team', + self.language_team.replace('LANGUAGE', + str(self.locale)))) + else: + headers.append(('Language-Team', self.language_team)) + if self.locale is not None: + headers.append(('Plural-Forms', self.plural_forms)) + headers.append(('MIME-Version', '1.0')) + headers.append(('Content-Type', + 'text/plain; charset=%s' % self.charset)) + headers.append(('Content-Transfer-Encoding', '8bit')) + headers.append(('Generated-By', 'Babel %s\n' % VERSION)) + return headers + + def _set_mime_headers(self, headers): + for name, value in headers: + name = name.lower() + if name == 'project-id-version': + parts = value.split(' ') + self.project = u' '.join(parts[:-1]) + self.version = parts[-1] + elif name == 'report-msgid-bugs-to': + self.msgid_bugs_address = value + elif name == 'last-translator': + self.last_translator = value + elif name == 'language-team': + self.language_team = value + elif name == 'content-type': + mimetype, params = parse_header(value) + if 'charset' in params: + self.charset = params['charset'].lower() + elif name == 'plural-forms': + _, params = parse_header(' ;' + value) + self._num_plurals = int(params.get('nplurals', 2)) + self._plural_expr = params.get('plural', '(n != 1)') + elif name == 'pot-creation-date': + # FIXME: this should use dates.parse_datetime as soon as that + # is ready + value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1) + + tt = time.strptime(value, '%Y-%m-%d %H:%M') + ts = time.mktime(tt) + + # Separate the offset into a sign component, hours, and minutes + plus_minus_s, rest = tzoffset[0], tzoffset[1:] + hours_offset_s, mins_offset_s = rest[:2], rest[2:] + + # Make them all integers + plus_minus = int(plus_minus_s + '1') + hours_offset = int(hours_offset_s) + mins_offset = int(mins_offset_s) + + # Calculate net offset + net_mins_offset = hours_offset * 60 + net_mins_offset += mins_offset + net_mins_offset *= plus_minus + + # Create an offset object + tzoffset = FixedOffsetTimezone(net_mins_offset) + + # Store the offset in a datetime object + dt = datetime.fromtimestamp(ts) + self.creation_date = dt.replace(tzinfo=tzoffset) + elif name == 'po-revision-date': + # Keep the value if it's not the default one + if 'YEAR' not in value: + # FIXME: this should use dates.parse_datetime as soon as + # that is ready + value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1) + tt = time.strptime(value, '%Y-%m-%d %H:%M') + ts = time.mktime(tt) + + # Separate the offset into a sign component, hours, and + # minutes + plus_minus_s, rest = tzoffset[0], tzoffset[1:] + hours_offset_s, mins_offset_s = rest[:2], rest[2:] + + # Make them all integers + plus_minus = int(plus_minus_s + '1') + hours_offset = int(hours_offset_s) + mins_offset = int(mins_offset_s) + + # Calculate net offset + net_mins_offset = hours_offset * 60 + net_mins_offset += mins_offset + net_mins_offset *= plus_minus + + # Create an offset object + tzoffset = FixedOffsetTimezone(net_mins_offset) + + # Store the offset in a datetime object + dt = datetime.fromtimestamp(ts) + self.revision_date = dt.replace(tzinfo=tzoffset) + + mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\ + The MIME headers of the catalog, used for the special ``msgid ""`` entry. + + The behavior of this property changes slightly depending on whether a locale + is set or not, the latter indicating that the catalog is actually a template + for actual translations. + + Here's an example of the output for such a catalog template: + + >>> from babel.dates import UTC + >>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC) + >>> catalog = Catalog(project='Foobar', version='1.0', + ... creation_date=created) + >>> for name, value in catalog.mime_headers: + ... print '%s: %s' % (name, value) + Project-Id-Version: Foobar 1.0 + Report-Msgid-Bugs-To: EMAIL@ADDRESS + POT-Creation-Date: 1990-04-01 15:30+0000 + PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + Last-Translator: FULL NAME + Language-Team: LANGUAGE + MIME-Version: 1.0 + Content-Type: text/plain; charset=utf-8 + Content-Transfer-Encoding: 8bit + Generated-By: Babel ... + + And here's an example of the output when the locale is set: + + >>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC) + >>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0', + ... creation_date=created, revision_date=revised, + ... last_translator='John Doe ', + ... language_team='de_DE ') + >>> for name, value in catalog.mime_headers: + ... print '%s: %s' % (name, value) + Project-Id-Version: Foobar 1.0 + Report-Msgid-Bugs-To: EMAIL@ADDRESS + POT-Creation-Date: 1990-04-01 15:30+0000 + PO-Revision-Date: 1990-08-03 12:00+0000 + Last-Translator: John Doe + Language-Team: de_DE + Plural-Forms: nplurals=2; plural=(n != 1) + MIME-Version: 1.0 + Content-Type: text/plain; charset=utf-8 + Content-Transfer-Encoding: 8bit + Generated-By: Babel ... + + :type: `list` + """) + + @property + def num_plurals(self): + """The number of plurals used by the catalog or locale. + + >>> Catalog(locale='en').num_plurals + 2 + >>> Catalog(locale='ga').num_plurals + 3 + + :type: `int`""" + if self._num_plurals is None: + num = 2 + if self.locale: + num = get_plural(self.locale)[0] + self._num_plurals = num + return self._num_plurals + + @property + def plural_expr(self): + """The plural expression used by the catalog or locale. + + >>> Catalog(locale='en').plural_expr + '(n != 1)' + >>> Catalog(locale='ga').plural_expr + '(n==1 ? 0 : n==2 ? 1 : 2)' + + :type: `string_types`""" + if self._plural_expr is None: + expr = '(n != 1)' + if self.locale: + expr = get_plural(self.locale)[1] + self._plural_expr = expr + return self._plural_expr + + @property + def plural_forms(self): + """Return the plural forms declaration for the locale. + + >>> Catalog(locale='en').plural_forms + 'nplurals=2; plural=(n != 1)' + >>> Catalog(locale='pt_BR').plural_forms + 'nplurals=2; plural=(n > 1)' + + :type: `str`""" + return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr) + + def __contains__(self, id): + """Return whether the catalog has a message with the specified ID.""" + return self._key_for(id) in self._messages + + def __len__(self): + """The number of messages in the catalog. + + This does not include the special ``msgid ""`` entry.""" + return len(self._messages) + + def __iter__(self): + """Iterates through all the entries in the catalog, in the order they + were added, yielding a `Message` object for every entry. + + :rtype: ``iterator``""" + buf = [] + for name, value in self.mime_headers: + buf.append('%s: %s' % (name, value)) + flags = set() + if self.fuzzy: + flags |= set(['fuzzy']) + yield Message(u'', '\n'.join(buf), flags=flags) + for key in self._messages: + yield self._messages[key] + + def __repr__(self): + locale = '' + if self.locale: + locale = ' %s' % self.locale + return '<%s %r%s>' % (type(self).__name__, self.domain, locale) + + def __delitem__(self, id): + """Delete the message with the specified ID.""" + self.delete(id) + + def __getitem__(self, id): + """Return the message with the specified ID. + + :param id: the message ID + """ + return self.get(id) + + def __setitem__(self, id, message): + """Add or update the message with the specified ID. + + >>> catalog = Catalog() + >>> catalog[u'foo'] = Message(u'foo') + >>> catalog[u'foo'] + + + If a message with that ID is already in the catalog, it is updated + to include the locations and flags of the new message. + + >>> catalog = Catalog() + >>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)]) + >>> catalog[u'foo'].locations + [('main.py', 1)] + >>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)]) + >>> catalog[u'foo'].locations + [('main.py', 1), ('utils.py', 5)] + + :param id: the message ID + :param message: the `Message` object + """ + assert isinstance(message, Message), 'expected a Message object' + key = self._key_for(id, message.context) + current = self._messages.get(key) + if current: + if message.pluralizable and not current.pluralizable: + # The new message adds pluralization + current.id = message.id + current.string = message.string + current.locations = list(distinct(current.locations + + message.locations)) + current.auto_comments = list(distinct(current.auto_comments + + message.auto_comments)) + current.user_comments = list(distinct(current.user_comments + + message.user_comments)) + current.flags |= message.flags + message = current + elif id == '': + # special treatment for the header message + self.mime_headers = _parse_header(message.string).items() + self.header_comment = '\n'.join([('# %s' % c).rstrip() for c + in message.user_comments]) + self.fuzzy = message.fuzzy + else: + if isinstance(id, (list, tuple)): + assert isinstance(message.string, (list, tuple)), \ + 'Expected sequence but got %s' % type(message.string) + self._messages[key] = message + + def add(self, id, string=None, locations=(), flags=(), auto_comments=(), + user_comments=(), previous_id=(), lineno=None, context=None): + """Add or update the message with the specified ID. + + >>> catalog = Catalog() + >>> catalog.add(u'foo') + + >>> catalog[u'foo'] + + + This method simply constructs a `Message` object with the given + arguments and invokes `__setitem__` with that object. + + :param id: the message ID, or a ``(singular, plural)`` tuple for + pluralizable messages + :param string: the translated message string, or a + ``(singular, plural)`` tuple for pluralizable messages + :param locations: a sequence of ``(filenname, lineno)`` tuples + :param flags: a set or sequence of flags + :param auto_comments: a sequence of automatic comments + :param user_comments: a sequence of user comments + :param previous_id: the previous message ID, or a ``(singular, plural)`` + tuple for pluralizable messages + :param lineno: the line number on which the msgid line was found in the + PO file, if any + :param context: the message context + """ + message = Message(id, string, list(locations), flags, auto_comments, + user_comments, previous_id, lineno=lineno, + context=context) + self[id] = message + return message + + def check(self): + """Run various validation checks on the translations in the catalog. + + For every message which fails validation, this method yield a + ``(message, errors)`` tuple, where ``message`` is the `Message` object + and ``errors`` is a sequence of `TranslationError` objects. + + :rtype: ``iterator`` + """ + for message in self._messages.values(): + errors = message.check(catalog=self) + if errors: + yield message, errors + + def get(self, id, context=None): + """Return the message with the specified ID and context. + + :param id: the message ID + :param context: the message context, or ``None`` for no context + """ + return self._messages.get(self._key_for(id, context)) + + def delete(self, id, context=None): + """Delete the message with the specified ID and context. + + :param id: the message ID + :param context: the message context, or ``None`` for no context + """ + key = self._key_for(id, context) + if key in self._messages: + del self._messages[key] + + def update(self, template, no_fuzzy_matching=False): + """Update the catalog based on the given template catalog. + + >>> from babel.messages import Catalog + >>> template = Catalog() + >>> template.add('green', locations=[('main.py', 99)]) + + >>> template.add('blue', locations=[('main.py', 100)]) + + >>> template.add(('salad', 'salads'), locations=[('util.py', 42)]) + + >>> catalog = Catalog(locale='de_DE') + >>> catalog.add('blue', u'blau', locations=[('main.py', 98)]) + + >>> catalog.add('head', u'Kopf', locations=[('util.py', 33)]) + + >>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'), + ... locations=[('util.py', 38)]) + + + >>> catalog.update(template) + >>> len(catalog) + 3 + + >>> msg1 = catalog['green'] + >>> msg1.string + >>> msg1.locations + [('main.py', 99)] + + >>> msg2 = catalog['blue'] + >>> msg2.string + u'blau' + >>> msg2.locations + [('main.py', 100)] + + >>> msg3 = catalog['salad'] + >>> msg3.string + (u'Salat', u'Salate') + >>> msg3.locations + [('util.py', 42)] + + Messages that are in the catalog but not in the template are removed + from the main collection, but can still be accessed via the `obsolete` + member: + + >>> 'head' in catalog + False + >>> catalog.obsolete.values() + [] + + :param template: the reference catalog, usually read from a POT file + :param no_fuzzy_matching: whether to use fuzzy matching of message IDs + """ + messages = self._messages + remaining = messages.copy() + self._messages = odict() + + # Prepare for fuzzy matching + fuzzy_candidates = [] + if not no_fuzzy_matching: + fuzzy_candidates = dict([ + (self._key_for(msgid), messages[msgid].context) + for msgid in messages if msgid and messages[msgid].string + ]) + fuzzy_matches = set() + + def _merge(message, oldkey, newkey): + message = message.clone() + fuzzy = False + if oldkey != newkey: + fuzzy = True + fuzzy_matches.add(oldkey) + oldmsg = messages.get(oldkey) + if isinstance(oldmsg.id, string_types): + message.previous_id = [oldmsg.id] + else: + message.previous_id = list(oldmsg.id) + else: + oldmsg = remaining.pop(oldkey, None) + message.string = oldmsg.string + if isinstance(message.id, (list, tuple)): + if not isinstance(message.string, (list, tuple)): + fuzzy = True + message.string = tuple( + [message.string] + ([u''] * (len(message.id) - 1)) + ) + elif len(message.string) != self.num_plurals: + fuzzy = True + message.string = tuple(message.string[:len(oldmsg.string)]) + elif isinstance(message.string, (list, tuple)): + fuzzy = True + message.string = message.string[0] + message.flags |= oldmsg.flags + if fuzzy: + message.flags |= set([u'fuzzy']) + self[message.id] = message + + for message in template: + if message.id: + key = self._key_for(message.id, message.context) + if key in messages: + _merge(message, key, key) + else: + if no_fuzzy_matching is False: + # do some fuzzy matching with difflib + if isinstance(key, tuple): + matchkey = key[0] # just the msgid, no context + else: + matchkey = key + matches = get_close_matches(matchkey.lower().strip(), + fuzzy_candidates.keys(), 1) + if matches: + newkey = matches[0] + newctxt = fuzzy_candidates[newkey] + if newctxt is not None: + newkey = newkey, newctxt + _merge(message, newkey, key) + continue + + self[message.id] = message + + for msgid in remaining: + if no_fuzzy_matching or msgid not in fuzzy_matches: + self.obsolete[msgid] = remaining[msgid] + # Make updated catalog's POT-Creation-Date equal to the template + # used to update the catalog + self.creation_date = template.creation_date + + def _key_for(self, id, context=None): + """The key for a message is just the singular ID even for pluralizable + messages, but is a ``(msgid, msgctxt)`` tuple for context-specific + messages. + """ + key = id + if isinstance(key, (list, tuple)): + key = id[0] + if context is not None: + key = (key, context) + return key diff --git a/awx/lib/site-packages/babel/messages/checkers.py b/awx/lib/site-packages/babel/messages/checkers.py new file mode 100644 index 0000000000..24ecdcfedb --- /dev/null +++ b/awx/lib/site-packages/babel/messages/checkers.py @@ -0,0 +1,173 @@ +# -*- coding: utf-8 -*- +""" + babel.messages.checkers + ~~~~~~~~~~~~~~~~~~~~~~~ + + Various routines that help with validation of translations. + + :since: version 0.9 + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +from babel.messages.catalog import TranslationError, PYTHON_FORMAT +from babel._compat import string_types, izip + + +#: list of format chars that are compatible to each other +_string_format_compatibilities = [ + set(['i', 'd', 'u']), + set(['x', 'X']), + set(['f', 'F', 'g', 'G']) +] + + +def num_plurals(catalog, message): + """Verify the number of plurals in the translation.""" + if not message.pluralizable: + if not isinstance(message.string, string_types): + raise TranslationError("Found plural forms for non-pluralizable " + "message") + return + + # skip further tests if no catalog is provided. + elif catalog is None: + return + + msgstrs = message.string + if not isinstance(msgstrs, (list, tuple)): + msgstrs = (msgstrs,) + if len(msgstrs) != catalog.num_plurals: + raise TranslationError("Wrong number of plural forms (expected %d)" % + catalog.num_plurals) + + +def python_format(catalog, message): + """Verify the format string placeholders in the translation.""" + if 'python-format' not in message.flags: + return + msgids = message.id + if not isinstance(msgids, (list, tuple)): + msgids = (msgids,) + msgstrs = message.string + if not isinstance(msgstrs, (list, tuple)): + msgstrs = (msgstrs,) + + for msgid, msgstr in izip(msgids, msgstrs): + if msgstr: + _validate_format(msgid, msgstr) + + +def _validate_format(format, alternative): + """Test format string `alternative` against `format`. `format` can be the + msgid of a message and `alternative` one of the `msgstr`\s. The two + arguments are not interchangeable as `alternative` may contain less + placeholders if `format` uses named placeholders. + + The behavior of this function is undefined if the string does not use + string formattings. + + If the string formatting of `alternative` is compatible to `format` the + function returns `None`, otherwise a `TranslationError` is raised. + + Examples for compatible format strings: + + >>> _validate_format('Hello %s!', 'Hallo %s!') + >>> _validate_format('Hello %i!', 'Hallo %d!') + + Example for an incompatible format strings: + + >>> _validate_format('Hello %(name)s!', 'Hallo %s!') + Traceback (most recent call last): + ... + TranslationError: the format strings are of different kinds + + This function is used by the `python_format` checker. + + :param format: The original format string + :param alternative: The alternative format string that should be checked + against format + :raises TranslationError: on formatting errors + """ + + def _parse(string): + result = [] + for match in PYTHON_FORMAT.finditer(string): + name, format, typechar = match.groups() + if typechar == '%' and name is None: + continue + result.append((name, str(typechar))) + return result + + def _compatible(a, b): + if a == b: + return True + for set in _string_format_compatibilities: + if a in set and b in set: + return True + return False + + def _check_positional(results): + positional = None + for name, char in results: + if positional is None: + positional = name is None + else: + if (name is None) != positional: + raise TranslationError('format string mixes positional ' + 'and named placeholders') + return bool(positional) + + a, b = map(_parse, (format, alternative)) + + # now check if both strings are positional or named + a_positional, b_positional = map(_check_positional, (a, b)) + if a_positional and not b_positional and not b: + raise TranslationError('placeholders are incompatible') + elif a_positional != b_positional: + raise TranslationError('the format strings are of different kinds') + + # if we are operating on positional strings both must have the + # same number of format chars and those must be compatible + if a_positional: + if len(a) != len(b): + raise TranslationError('positional format placeholders are ' + 'unbalanced') + for idx, ((_, first), (_, second)) in enumerate(izip(a, b)): + if not _compatible(first, second): + raise TranslationError('incompatible format for placeholder ' + '%d: %r and %r are not compatible' % + (idx + 1, first, second)) + + # otherwise the second string must not have names the first one + # doesn't have and the types of those included must be compatible + else: + type_map = dict(a) + for name, typechar in b: + if name not in type_map: + raise TranslationError('unknown named placeholder %r' % name) + elif not _compatible(typechar, type_map[name]): + raise TranslationError('incompatible format for ' + 'placeholder %r: ' + '%r and %r are not compatible' % + (name, typechar, type_map[name])) + + +def _find_checkers(): + checkers = [] + try: + from pkg_resources import working_set + except ImportError: + pass + else: + for entry_point in working_set.iter_entry_points('babel.checkers'): + checkers.append(entry_point.load()) + if len(checkers) == 0: + # if pkg_resources is not available or no usable egg-info was found + # (see #230), just resort to hard-coded checkers + return [num_plurals, python_format] + return checkers + + +checkers = _find_checkers() diff --git a/awx/lib/site-packages/babel/messages/extract.py b/awx/lib/site-packages/babel/messages/extract.py new file mode 100644 index 0000000000..2f8084af53 --- /dev/null +++ b/awx/lib/site-packages/babel/messages/extract.py @@ -0,0 +1,562 @@ +# -*- coding: utf-8 -*- +""" + babel.messages.extract + ~~~~~~~~~~~~~~~~~~~~~~ + + Basic infrastructure for extracting localizable messages from source files. + + This module defines an extensible system for collecting localizable message + strings from a variety of sources. A native extractor for Python source + files is builtin, extractors for other sources can be added using very + simple plugins. + + The main entry points into the extraction functionality are the functions + `extract_from_dir` and `extract_from_file`. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +import os +import sys +from tokenize import generate_tokens, COMMENT, NAME, OP, STRING + +from babel.util import parse_encoding, pathmatch, relpath +from babel._compat import PY2, text_type +from textwrap import dedent + + +GROUP_NAME = 'babel.extractors' + +DEFAULT_KEYWORDS = { + '_': None, + 'gettext': None, + 'ngettext': (1, 2), + 'ugettext': None, + 'ungettext': (1, 2), + 'dgettext': (2,), + 'dngettext': (2, 3), + 'N_': None, + 'pgettext': ((1, 'c'), 2) +} + +DEFAULT_MAPPING = [('**.py', 'python')] + +empty_msgid_warning = ( +'%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") ' +'returns the header entry with meta information, not the empty string.') + + +def _strip_comment_tags(comments, tags): + """Helper function for `extract` that strips comment tags from strings + in a list of comment lines. This functions operates in-place. + """ + def _strip(line): + for tag in tags: + if line.startswith(tag): + return line[len(tag):].strip() + return line + comments[:] = map(_strip, comments) + + +def extract_from_dir(dirname=None, method_map=DEFAULT_MAPPING, + options_map=None, keywords=DEFAULT_KEYWORDS, + comment_tags=(), callback=None, strip_comment_tags=False): + """Extract messages from any source files found in the given directory. + + This function generates tuples of the form ``(filename, lineno, message, + comments, context)``. + + Which extraction method is used per file is determined by the `method_map` + parameter, which maps extended glob patterns to extraction method names. + For example, the following is the default mapping: + + >>> method_map = [ + ... ('**.py', 'python') + ... ] + + This basically says that files with the filename extension ".py" at any + level inside the directory should be processed by the "python" extraction + method. Files that don't match any of the mapping patterns are ignored. See + the documentation of the `pathmatch` function for details on the pattern + syntax. + + The following extended mapping would also use the "genshi" extraction + method on any file in "templates" subdirectory: + + >>> method_map = [ + ... ('**/templates/**.*', 'genshi'), + ... ('**.py', 'python') + ... ] + + The dictionary provided by the optional `options_map` parameter augments + these mappings. It uses extended glob patterns as keys, and the values are + dictionaries mapping options names to option values (both strings). + + The glob patterns of the `options_map` do not necessarily need to be the + same as those used in the method mapping. For example, while all files in + the ``templates`` folders in an application may be Genshi applications, the + options for those files may differ based on extension: + + >>> options_map = { + ... '**/templates/**.txt': { + ... 'template_class': 'genshi.template:TextTemplate', + ... 'encoding': 'latin-1' + ... }, + ... '**/templates/**.html': { + ... 'include_attrs': '' + ... } + ... } + + :param dirname: the path to the directory to extract messages from. If + not given the current working directory is used. + :param method_map: a list of ``(pattern, method)`` tuples that maps of + extraction method names to extended glob patterns + :param options_map: a dictionary of additional options (optional) + :param keywords: a dictionary mapping keywords (i.e. names of functions + that should be recognized as translation functions) to + tuples that specify which of their arguments contain + localizable strings + :param comment_tags: a list of tags of translator comments to search for + and include in the results + :param callback: a function that is called for every file that message are + extracted from, just before the extraction itself is + performed; the function is passed the filename, the name + of the extraction method and and the options dictionary as + positional arguments, in that order + :param strip_comment_tags: a flag that if set to `True` causes all comment + tags to be removed from the collected comments. + :see: `pathmatch` + """ + if dirname is None: + dirname = os.getcwd() + if options_map is None: + options_map = {} + + absname = os.path.abspath(dirname) + for root, dirnames, filenames in os.walk(absname): + for subdir in dirnames: + if subdir.startswith('.') or subdir.startswith('_'): + dirnames.remove(subdir) + dirnames.sort() + filenames.sort() + for filename in filenames: + filename = relpath( + os.path.join(root, filename).replace(os.sep, '/'), + dirname + ) + for pattern, method in method_map: + if pathmatch(pattern, filename): + filepath = os.path.join(absname, filename) + options = {} + for opattern, odict in options_map.items(): + if pathmatch(opattern, filename): + options = odict + if callback: + callback(filename, method, options) + for lineno, message, comments, context in \ + extract_from_file(method, filepath, + keywords=keywords, + comment_tags=comment_tags, + options=options, + strip_comment_tags= + strip_comment_tags): + yield filename, lineno, message, comments, context + break + + +def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS, + comment_tags=(), options=None, strip_comment_tags=False): + """Extract messages from a specific file. + + This function returns a list of tuples of the form ``(lineno, funcname, + message)``. + + :param filename: the path to the file to extract messages from + :param method: a string specifying the extraction method (.e.g. "python") + :param keywords: a dictionary mapping keywords (i.e. names of functions + that should be recognized as translation functions) to + tuples that specify which of their arguments contain + localizable strings + :param comment_tags: a list of translator tags to search for and include + in the results + :param strip_comment_tags: a flag that if set to `True` causes all comment + tags to be removed from the collected comments. + :param options: a dictionary of additional options (optional) + """ + fileobj = open(filename, 'rb') + try: + return list(extract(method, fileobj, keywords, comment_tags, options, + strip_comment_tags)) + finally: + fileobj.close() + + +def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(), + options=None, strip_comment_tags=False): + """Extract messages from the given file-like object using the specified + extraction method. + + This function returns tuples of the form ``(lineno, message, comments)``. + + The implementation dispatches the actual extraction to plugins, based on the + value of the ``method`` parameter. + + >>> source = '''# foo module + ... def run(argv): + ... print _('Hello, world!') + ... ''' + + >>> from StringIO import StringIO + >>> for message in extract('python', StringIO(source)): + ... print message + (3, u'Hello, world!', [], None) + + :param method: a string specifying the extraction method (.e.g. "python"); + if this is a simple name, the extraction function will be + looked up by entry point; if it is an explicit reference + to a function (of the form ``package.module:funcname`` or + ``package.module.funcname``), the corresponding function + will be imported and used + :param fileobj: the file-like object the messages should be extracted from + :param keywords: a dictionary mapping keywords (i.e. names of functions + that should be recognized as translation functions) to + tuples that specify which of their arguments contain + localizable strings + :param comment_tags: a list of translator tags to search for and include + in the results + :param options: a dictionary of additional options (optional) + :param strip_comment_tags: a flag that if set to `True` causes all comment + tags to be removed from the collected comments. + :raise ValueError: if the extraction method is not registered + """ + func = None + if ':' in method or '.' in method: + if ':' not in method: + lastdot = method.rfind('.') + module, attrname = method[:lastdot], method[lastdot + 1:] + else: + module, attrname = method.split(':', 1) + func = getattr(__import__(module, {}, {}, [attrname]), attrname) + else: + try: + from pkg_resources import working_set + except ImportError: + pass + else: + for entry_point in working_set.iter_entry_points(GROUP_NAME, + method): + func = entry_point.load(require=True) + break + if func is None: + # if pkg_resources is not available or no usable egg-info was found + # (see #230), we resort to looking up the builtin extractors + # directly + builtin = { + 'ignore': extract_nothing, + 'python': extract_python, + 'javascript': extract_javascript + } + func = builtin.get(method) + if func is None: + raise ValueError('Unknown extraction method %r' % method) + + results = func(fileobj, keywords.keys(), comment_tags, + options=options or {}) + + for lineno, funcname, messages, comments in results: + if funcname: + spec = keywords[funcname] or (1,) + else: + spec = (1,) + if not isinstance(messages, (list, tuple)): + messages = [messages] + if not messages: + continue + + # Validate the messages against the keyword's specification + context = None + msgs = [] + invalid = False + # last_index is 1 based like the keyword spec + last_index = len(messages) + for index in spec: + if isinstance(index, tuple): + context = messages[index[0] - 1] + continue + if last_index < index: + # Not enough arguments + invalid = True + break + message = messages[index - 1] + if message is None: + invalid = True + break + msgs.append(message) + if invalid: + continue + + # keyword spec indexes are 1 based, therefore '-1' + if isinstance(spec[0], tuple): + # context-aware *gettext method + first_msg_index = spec[1] - 1 + else: + first_msg_index = spec[0] - 1 + if not messages[first_msg_index]: + # An empty string msgid isn't valid, emit a warning + where = '%s:%i' % (hasattr(fileobj, 'name') and \ + fileobj.name or '(unknown)', lineno) + sys.stderr.write((empty_msgid_warning % where) + '\n') + continue + + messages = tuple(msgs) + if len(messages) == 1: + messages = messages[0] + + if strip_comment_tags: + _strip_comment_tags(comments, comment_tags) + yield lineno, messages, comments, context + + +def extract_nothing(fileobj, keywords, comment_tags, options): + """Pseudo extractor that does not actually extract anything, but simply + returns an empty list. + """ + return [] + + +def extract_python(fileobj, keywords, comment_tags, options): + """Extract messages from Python source code. + + It returns an iterator yielding tuples in the following form ``(lineno, + funcname, message, comments)``. + + :param fileobj: the seekable, file-like object the messages should be + extracted from + :param keywords: a list of keywords (i.e. function names) that should be + recognized as translation functions + :param comment_tags: a list of translator tags to search for and include + in the results + :param options: a dictionary of additional options (optional) + :rtype: ``iterator`` + """ + funcname = lineno = message_lineno = None + call_stack = -1 + buf = [] + messages = [] + translator_comments = [] + in_def = in_translator_comments = False + comment_tag = None + + encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1') + + if PY2: + next_line = fileobj.readline + else: + next_line = lambda: fileobj.readline().decode(encoding) + + tokens = generate_tokens(next_line) + for tok, value, (lineno, _), _, _ in tokens: + if call_stack == -1 and tok == NAME and value in ('def', 'class'): + in_def = True + elif tok == OP and value == '(': + if in_def: + # Avoid false positives for declarations such as: + # def gettext(arg='message'): + in_def = False + continue + if funcname: + message_lineno = lineno + call_stack += 1 + elif in_def and tok == OP and value == ':': + # End of a class definition without parens + in_def = False + continue + elif call_stack == -1 and tok == COMMENT: + # Strip the comment token from the line + if PY2: + value = value.decode(encoding) + value = value[1:].strip() + if in_translator_comments and \ + translator_comments[-1][0] == lineno - 1: + # We're already inside a translator comment, continue appending + translator_comments.append((lineno, value)) + continue + # If execution reaches this point, let's see if comment line + # starts with one of the comment tags + for comment_tag in comment_tags: + if value.startswith(comment_tag): + in_translator_comments = True + translator_comments.append((lineno, value)) + break + elif funcname and call_stack == 0: + if tok == OP and value == ')': + if buf: + messages.append(''.join(buf)) + del buf[:] + else: + messages.append(None) + + if len(messages) > 1: + messages = tuple(messages) + else: + messages = messages[0] + # Comments don't apply unless they immediately preceed the + # message + if translator_comments and \ + translator_comments[-1][0] < message_lineno - 1: + translator_comments = [] + + yield (message_lineno, funcname, messages, + [comment[1] for comment in translator_comments]) + + funcname = lineno = message_lineno = None + call_stack = -1 + messages = [] + translator_comments = [] + in_translator_comments = False + elif tok == STRING: + # Unwrap quotes in a safe manner, maintaining the string's + # encoding + # https://sourceforge.net/tracker/?func=detail&atid=355470& + # aid=617979&group_id=5470 + value = eval('# coding=%s\n%s' % (str(encoding), value), + {'__builtins__':{}}, {}) + if PY2 and not isinstance(value, text_type): + value = value.decode(encoding) + buf.append(value) + elif tok == OP and value == ',': + if buf: + messages.append(''.join(buf)) + del buf[:] + else: + messages.append(None) + if translator_comments: + # We have translator comments, and since we're on a + # comma(,) user is allowed to break into a new line + # Let's increase the last comment's lineno in order + # for the comment to still be a valid one + old_lineno, old_comment = translator_comments.pop() + translator_comments.append((old_lineno+1, old_comment)) + elif call_stack > 0 and tok == OP and value == ')': + call_stack -= 1 + elif funcname and call_stack == -1: + funcname = None + elif tok == NAME and value in keywords: + funcname = value + + +def extract_javascript(fileobj, keywords, comment_tags, options): + """Extract messages from JavaScript source code. + + :param fileobj: the seekable, file-like object the messages should be + extracted from + :param keywords: a list of keywords (i.e. function names) that should be + recognized as translation functions + :param comment_tags: a list of translator tags to search for and include + in the results + :param options: a dictionary of additional options (optional) + """ + from babel.messages.jslexer import tokenize, unquote_string + funcname = message_lineno = None + messages = [] + last_argument = None + translator_comments = [] + concatenate_next = False + encoding = options.get('encoding', 'utf-8') + last_token = None + call_stack = -1 + + for token in tokenize(fileobj.read().decode(encoding)): + if token.type == 'operator' and token.value == '(': + if funcname: + message_lineno = token.lineno + call_stack += 1 + + elif call_stack == -1 and token.type == 'linecomment': + value = token.value[2:].strip() + if translator_comments and \ + translator_comments[-1][0] == token.lineno - 1: + translator_comments.append((token.lineno, value)) + continue + + for comment_tag in comment_tags: + if value.startswith(comment_tag): + translator_comments.append((token.lineno, value.strip())) + break + + elif token.type == 'multilinecomment': + # only one multi-line comment may preceed a translation + translator_comments = [] + value = token.value[2:-2].strip() + for comment_tag in comment_tags: + if value.startswith(comment_tag): + lines = value.splitlines() + if lines: + lines[0] = lines[0].strip() + lines[1:] = dedent('\n'.join(lines[1:])).splitlines() + for offset, line in enumerate(lines): + translator_comments.append((token.lineno + offset, + line)) + break + + elif funcname and call_stack == 0: + if token.type == 'operator' and token.value == ')': + if last_argument is not None: + messages.append(last_argument) + if len(messages) > 1: + messages = tuple(messages) + elif messages: + messages = messages[0] + else: + messages = None + + # Comments don't apply unless they immediately precede the + # message + if translator_comments and \ + translator_comments[-1][0] < message_lineno - 1: + translator_comments = [] + + if messages is not None: + yield (message_lineno, funcname, messages, + [comment[1] for comment in translator_comments]) + + funcname = message_lineno = last_argument = None + concatenate_next = False + translator_comments = [] + messages = [] + call_stack = -1 + + elif token.type == 'string': + new_value = unquote_string(token.value) + if concatenate_next: + last_argument = (last_argument or '') + new_value + concatenate_next = False + else: + last_argument = new_value + + elif token.type == 'operator': + if token.value == ',': + if last_argument is not None: + messages.append(last_argument) + last_argument = None + else: + messages.append(None) + concatenate_next = False + elif token.value == '+': + concatenate_next = True + + elif call_stack > 0 and token.type == 'operator' \ + and token.value == ')': + call_stack -= 1 + + elif funcname and call_stack == -1: + funcname = None + + elif call_stack == -1 and token.type == 'name' and \ + token.value in keywords and \ + (last_token is None or last_token.type != 'name' or + last_token.value != 'function'): + funcname = token.value + + last_token = token diff --git a/awx/lib/site-packages/babel/messages/frontend.py b/awx/lib/site-packages/babel/messages/frontend.py new file mode 100644 index 0000000000..144bc98a10 --- /dev/null +++ b/awx/lib/site-packages/babel/messages/frontend.py @@ -0,0 +1,1259 @@ +# -*- coding: utf-8 -*- +""" + babel.messages.frontend + ~~~~~~~~~~~~~~~~~~~~~~~ + + Frontends for the message extraction functionality. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +try: + from ConfigParser import RawConfigParser +except ImportError: + from configparser import RawConfigParser +from datetime import datetime +from distutils import log +from distutils.cmd import Command +from distutils.errors import DistutilsOptionError, DistutilsSetupError +from locale import getpreferredencoding +import logging +from optparse import OptionParser +import os +import re +import shutil +import sys +import tempfile + +from babel import __version__ as VERSION +from babel import Locale, localedata +from babel.core import UnknownLocaleError +from babel.messages.catalog import Catalog +from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \ + DEFAULT_MAPPING +from babel.messages.mofile import write_mo +from babel.messages.pofile import read_po, write_po +from babel.util import odict, LOCALTZ +from babel._compat import string_types, BytesIO, PY2 + + +class compile_catalog(Command): + """Catalog compilation command for use in ``setup.py`` scripts. + + If correctly installed, this command is available to Setuptools-using + setup scripts automatically. For projects using plain old ``distutils``, + the command needs to be registered explicitly in ``setup.py``:: + + from babel.messages.frontend import compile_catalog + + setup( + ... + cmdclass = {'compile_catalog': compile_catalog} + ) + + .. versionadded:: 0.9 + """ + + description = 'compile message catalogs to binary MO files' + user_options = [ + ('domain=', 'D', + "domain of PO file (default 'messages')"), + ('directory=', 'd', + 'path to base directory containing the catalogs'), + ('input-file=', 'i', + 'name of the input file'), + ('output-file=', 'o', + "name of the output file (default " + "'//LC_MESSAGES/.po')"), + ('locale=', 'l', + 'locale of the catalog to compile'), + ('use-fuzzy', 'f', + 'also include fuzzy translations'), + ('statistics', None, + 'print statistics about translations') + ] + boolean_options = ['use-fuzzy', 'statistics'] + + def initialize_options(self): + self.domain = 'messages' + self.directory = None + self.input_file = None + self.output_file = None + self.locale = None + self.use_fuzzy = False + self.statistics = False + + def finalize_options(self): + if not self.input_file and not self.directory: + raise DistutilsOptionError('you must specify either the input file ' + 'or the base directory') + if not self.output_file and not self.directory: + raise DistutilsOptionError('you must specify either the input file ' + 'or the base directory') + + def run(self): + po_files = [] + mo_files = [] + + if not self.input_file: + if self.locale: + po_files.append((self.locale, + os.path.join(self.directory, self.locale, + 'LC_MESSAGES', + self.domain + '.po'))) + mo_files.append(os.path.join(self.directory, self.locale, + 'LC_MESSAGES', + self.domain + '.mo')) + else: + for locale in os.listdir(self.directory): + po_file = os.path.join(self.directory, locale, + 'LC_MESSAGES', self.domain + '.po') + if os.path.exists(po_file): + po_files.append((locale, po_file)) + mo_files.append(os.path.join(self.directory, locale, + 'LC_MESSAGES', + self.domain + '.mo')) + else: + po_files.append((self.locale, self.input_file)) + if self.output_file: + mo_files.append(self.output_file) + else: + mo_files.append(os.path.join(self.directory, self.locale, + 'LC_MESSAGES', + self.domain + '.mo')) + + if not po_files: + raise DistutilsOptionError('no message catalogs found') + + for idx, (locale, po_file) in enumerate(po_files): + mo_file = mo_files[idx] + infile = open(po_file, 'r') + try: + catalog = read_po(infile, locale) + finally: + infile.close() + + if self.statistics: + translated = 0 + for message in list(catalog)[1:]: + if message.string: + translated +=1 + percentage = 0 + if len(catalog): + percentage = translated * 100 // len(catalog) + log.info('%d of %d messages (%d%%) translated in %r', + translated, len(catalog), percentage, po_file) + + if catalog.fuzzy and not self.use_fuzzy: + log.warn('catalog %r is marked as fuzzy, skipping', po_file) + continue + + for message, errors in catalog.check(): + for error in errors: + log.error('error: %s:%d: %s', po_file, message.lineno, + error) + + log.info('compiling catalog %r to %r', po_file, mo_file) + + outfile = open(mo_file, 'wb') + try: + write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy) + finally: + outfile.close() + + +class extract_messages(Command): + """Message extraction command for use in ``setup.py`` scripts. + + If correctly installed, this command is available to Setuptools-using + setup scripts automatically. For projects using plain old ``distutils``, + the command needs to be registered explicitly in ``setup.py``:: + + from babel.messages.frontend import extract_messages + + setup( + ... + cmdclass = {'extract_messages': extract_messages} + ) + """ + + description = 'extract localizable strings from the project code' + user_options = [ + ('charset=', None, + 'charset to use in the output file'), + ('keywords=', 'k', + 'space-separated list of keywords to look for in addition to the ' + 'defaults'), + ('no-default-keywords', None, + 'do not include the default keywords'), + ('mapping-file=', 'F', + 'path to the mapping configuration file'), + ('no-location', None, + 'do not include location comments with filename and line number'), + ('omit-header', None, + 'do not include msgid "" entry in header'), + ('output-file=', 'o', + 'name of the output file'), + ('width=', 'w', + 'set output line width (default 76)'), + ('no-wrap', None, + 'do not break long message lines, longer than the output line width, ' + 'into several lines'), + ('sort-output', None, + 'generate sorted output (default False)'), + ('sort-by-file', None, + 'sort output by file location (default False)'), + ('msgid-bugs-address=', None, + 'set report address for msgid'), + ('copyright-holder=', None, + 'set copyright holder in output'), + ('add-comments=', 'c', + 'place comment block with TAG (or those preceding keyword lines) in ' + 'output file. Separate multiple TAGs with commas(,)'), + ('strip-comments', None, + 'strip the comment TAGs from the comments.'), + ('input-dirs=', None, + 'directories that should be scanned for messages. Separate multiple ' + 'directories with commas(,)'), + ] + boolean_options = [ + 'no-default-keywords', 'no-location', 'omit-header', 'no-wrap', + 'sort-output', 'sort-by-file', 'strip-comments' + ] + + def initialize_options(self): + self.charset = 'utf-8' + self.keywords = '' + self._keywords = DEFAULT_KEYWORDS.copy() + self.no_default_keywords = False + self.mapping_file = None + self.no_location = False + self.omit_header = False + self.output_file = None + self.input_dirs = None + self.width = None + self.no_wrap = False + self.sort_output = False + self.sort_by_file = False + self.msgid_bugs_address = None + self.copyright_holder = None + self.add_comments = None + self._add_comments = [] + self.strip_comments = False + + def finalize_options(self): + if self.no_default_keywords and not self.keywords: + raise DistutilsOptionError('you must specify new keywords if you ' + 'disable the default ones') + if self.no_default_keywords: + self._keywords = {} + if self.keywords: + self._keywords.update(parse_keywords(self.keywords.split())) + + if not self.output_file: + raise DistutilsOptionError('no output file specified') + if self.no_wrap and self.width: + raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " + "exclusive") + if not self.no_wrap and not self.width: + self.width = 76 + elif self.width is not None: + self.width = int(self.width) + + if self.sort_output and self.sort_by_file: + raise DistutilsOptionError("'--sort-output' and '--sort-by-file' " + "are mutually exclusive") + + if self.input_dirs: + self.input_dirs = re.split(',\s*', self.input_dirs) + else: + self.input_dirs = dict.fromkeys([k.split('.',1)[0] + for k in self.distribution.packages + ]).keys() + + if self.add_comments: + self._add_comments = self.add_comments.split(',') + + def run(self): + mappings = self._get_mappings() + outfile = open(self.output_file, 'wb') + try: + catalog = Catalog(project=self.distribution.get_name(), + version=self.distribution.get_version(), + msgid_bugs_address=self.msgid_bugs_address, + copyright_holder=self.copyright_holder, + charset=self.charset) + + for dirname, (method_map, options_map) in mappings.items(): + def callback(filename, method, options): + if method == 'ignore': + return + filepath = os.path.normpath(os.path.join(dirname, filename)) + optstr = '' + if options: + optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for + k, v in options.items()]) + log.info('extracting messages from %s%s', filepath, optstr) + + extracted = extract_from_dir(dirname, method_map, options_map, + keywords=self._keywords, + comment_tags=self._add_comments, + callback=callback, + strip_comment_tags= + self.strip_comments) + for filename, lineno, message, comments, context in extracted: + filepath = os.path.normpath(os.path.join(dirname, filename)) + catalog.add(message, None, [(filepath, lineno)], + auto_comments=comments, context=context) + + log.info('writing PO template file to %s' % self.output_file) + write_po(outfile, catalog, width=self.width, + no_location=self.no_location, + omit_header=self.omit_header, + sort_output=self.sort_output, + sort_by_file=self.sort_by_file) + finally: + outfile.close() + + def _get_mappings(self): + mappings = {} + + if self.mapping_file: + fileobj = open(self.mapping_file, 'U') + try: + method_map, options_map = parse_mapping(fileobj) + for dirname in self.input_dirs: + mappings[dirname] = method_map, options_map + finally: + fileobj.close() + + elif getattr(self.distribution, 'message_extractors', None): + message_extractors = self.distribution.message_extractors + for dirname, mapping in message_extractors.items(): + if isinstance(mapping, string_types): + method_map, options_map = parse_mapping(BytesIO(mapping)) + else: + method_map, options_map = [], {} + for pattern, method, options in mapping: + method_map.append((pattern, method)) + options_map[pattern] = options or {} + mappings[dirname] = method_map, options_map + + else: + for dirname in self.input_dirs: + mappings[dirname] = DEFAULT_MAPPING, {} + + return mappings + + +def check_message_extractors(dist, name, value): + """Validate the ``message_extractors`` keyword argument to ``setup()``. + + :param dist: the distutils/setuptools ``Distribution`` object + :param name: the name of the keyword argument (should always be + "message_extractors") + :param value: the value of the keyword argument + :raise `DistutilsSetupError`: if the value is not valid + """ + assert name == 'message_extractors' + if not isinstance(value, dict): + raise DistutilsSetupError('the value of the "message_extractors" ' + 'parameter must be a dictionary') + + +class init_catalog(Command): + """New catalog initialization command for use in ``setup.py`` scripts. + + If correctly installed, this command is available to Setuptools-using + setup scripts automatically. For projects using plain old ``distutils``, + the command needs to be registered explicitly in ``setup.py``:: + + from babel.messages.frontend import init_catalog + + setup( + ... + cmdclass = {'init_catalog': init_catalog} + ) + """ + + description = 'create a new catalog based on a POT file' + user_options = [ + ('domain=', 'D', + "domain of PO file (default 'messages')"), + ('input-file=', 'i', + 'name of the input file'), + ('output-dir=', 'd', + 'path to output directory'), + ('output-file=', 'o', + "name of the output file (default " + "'//LC_MESSAGES/.po')"), + ('locale=', 'l', + 'locale for the new localized catalog'), + ('width=', 'w', + 'set output line width (default 76)'), + ('no-wrap', None, + 'do not break long message lines, longer than the output line width, ' + 'into several lines'), + ] + boolean_options = ['no-wrap'] + + def initialize_options(self): + self.output_dir = None + self.output_file = None + self.input_file = None + self.locale = None + self.domain = 'messages' + self.no_wrap = False + self.width = None + + def finalize_options(self): + if not self.input_file: + raise DistutilsOptionError('you must specify the input file') + + if not self.locale: + raise DistutilsOptionError('you must provide a locale for the ' + 'new catalog') + try: + self._locale = Locale.parse(self.locale) + except UnknownLocaleError as e: + raise DistutilsOptionError(e) + + if not self.output_file and not self.output_dir: + raise DistutilsOptionError('you must specify the output directory') + if not self.output_file: + self.output_file = os.path.join(self.output_dir, self.locale, + 'LC_MESSAGES', self.domain + '.po') + + if not os.path.exists(os.path.dirname(self.output_file)): + os.makedirs(os.path.dirname(self.output_file)) + if self.no_wrap and self.width: + raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " + "exclusive") + if not self.no_wrap and not self.width: + self.width = 76 + elif self.width is not None: + self.width = int(self.width) + + def run(self): + log.info('creating catalog %r based on %r', self.output_file, + self.input_file) + + infile = open(self.input_file, 'r') + try: + # Although reading from the catalog template, read_po must be fed + # the locale in order to correctly calculate plurals + catalog = read_po(infile, locale=self.locale) + finally: + infile.close() + + catalog.locale = self._locale + catalog.revision_date = datetime.now(LOCALTZ) + catalog.fuzzy = False + + outfile = open(self.output_file, 'wb') + try: + write_po(outfile, catalog, width=self.width) + finally: + outfile.close() + + +class update_catalog(Command): + """Catalog merging command for use in ``setup.py`` scripts. + + If correctly installed, this command is available to Setuptools-using + setup scripts automatically. For projects using plain old ``distutils``, + the command needs to be registered explicitly in ``setup.py``:: + + from babel.messages.frontend import update_catalog + + setup( + ... + cmdclass = {'update_catalog': update_catalog} + ) + + .. versionadded:: 0.9 + """ + + description = 'update message catalogs from a POT file' + user_options = [ + ('domain=', 'D', + "domain of PO file (default 'messages')"), + ('input-file=', 'i', + 'name of the input file'), + ('output-dir=', 'd', + 'path to base directory containing the catalogs'), + ('output-file=', 'o', + "name of the output file (default " + "'//LC_MESSAGES/.po')"), + ('locale=', 'l', + 'locale of the catalog to compile'), + ('width=', 'w', + 'set output line width (default 76)'), + ('no-wrap', None, + 'do not break long message lines, longer than the output line width, ' + 'into several lines'), + ('ignore-obsolete=', None, + 'whether to omit obsolete messages from the output'), + ('no-fuzzy-matching', 'N', + 'do not use fuzzy matching'), + ('previous', None, + 'keep previous msgids of translated messages') + ] + boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous'] + + def initialize_options(self): + self.domain = 'messages' + self.input_file = None + self.output_dir = None + self.output_file = None + self.locale = None + self.width = None + self.no_wrap = False + self.ignore_obsolete = False + self.no_fuzzy_matching = False + self.previous = False + + def finalize_options(self): + if not self.input_file: + raise DistutilsOptionError('you must specify the input file') + if not self.output_file and not self.output_dir: + raise DistutilsOptionError('you must specify the output file or ' + 'directory') + if self.output_file and not self.locale: + raise DistutilsOptionError('you must specify the locale') + if self.no_wrap and self.width: + raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " + "exclusive") + if not self.no_wrap and not self.width: + self.width = 76 + elif self.width is not None: + self.width = int(self.width) + if self.no_fuzzy_matching and self.previous: + self.previous = False + + def run(self): + po_files = [] + if not self.output_file: + if self.locale: + po_files.append((self.locale, + os.path.join(self.output_dir, self.locale, + 'LC_MESSAGES', + self.domain + '.po'))) + else: + for locale in os.listdir(self.output_dir): + po_file = os.path.join(self.output_dir, locale, + 'LC_MESSAGES', + self.domain + '.po') + if os.path.exists(po_file): + po_files.append((locale, po_file)) + else: + po_files.append((self.locale, self.output_file)) + + domain = self.domain + if not domain: + domain = os.path.splitext(os.path.basename(self.input_file))[0] + + infile = open(self.input_file, 'U') + try: + template = read_po(infile) + finally: + infile.close() + + if not po_files: + raise DistutilsOptionError('no message catalogs found') + + for locale, filename in po_files: + log.info('updating catalog %r based on %r', filename, + self.input_file) + infile = open(filename, 'U') + try: + catalog = read_po(infile, locale=locale, domain=domain) + finally: + infile.close() + + catalog.update(template, self.no_fuzzy_matching) + + tmpname = os.path.join(os.path.dirname(filename), + tempfile.gettempprefix() + + os.path.basename(filename)) + tmpfile = open(tmpname, 'w') + try: + try: + write_po(tmpfile, catalog, + ignore_obsolete=self.ignore_obsolete, + include_previous=self.previous, width=self.width) + finally: + tmpfile.close() + except: + os.remove(tmpname) + raise + + try: + os.rename(tmpname, filename) + except OSError: + # We're probably on Windows, which doesn't support atomic + # renames, at least not through Python + # If the error is in fact due to a permissions problem, that + # same error is going to be raised from one of the following + # operations + os.remove(filename) + shutil.copy(tmpname, filename) + os.remove(tmpname) + + +class CommandLineInterface(object): + """Command-line interface. + + This class provides a simple command-line interface to the message + extraction and PO file generation functionality. + """ + + usage = '%%prog %s [options] %s' + version = '%%prog %s' % VERSION + commands = { + 'compile': 'compile message catalogs to MO files', + 'extract': 'extract messages from source files and generate a POT file', + 'init': 'create new message catalogs from a POT file', + 'update': 'update existing message catalogs from a POT file' + } + + def run(self, argv=sys.argv): + """Main entry point of the command-line interface. + + :param argv: list of arguments passed on the command-line + """ + self.parser = OptionParser(usage=self.usage % ('command', '[args]'), + version=self.version) + self.parser.disable_interspersed_args() + self.parser.print_help = self._help + self.parser.add_option('--list-locales', dest='list_locales', + action='store_true', + help="print all known locales and exit") + self.parser.add_option('-v', '--verbose', action='store_const', + dest='loglevel', const=logging.DEBUG, + help='print as much as possible') + self.parser.add_option('-q', '--quiet', action='store_const', + dest='loglevel', const=logging.ERROR, + help='print as little as possible') + self.parser.set_defaults(list_locales=False, loglevel=logging.INFO) + + options, args = self.parser.parse_args(argv[1:]) + + self._configure_logging(options.loglevel) + if options.list_locales: + identifiers = localedata.locale_identifiers() + longest = max([len(identifier) for identifier in identifiers]) + identifiers.sort() + format = u'%%-%ds %%s' % (longest + 1) + for identifier in identifiers: + locale = Locale.parse(identifier) + output = format % (identifier, locale.english_name) + print(output.encode(sys.stdout.encoding or + getpreferredencoding() or + 'ascii', 'replace')) + return 0 + + if not args: + self.parser.error('no valid command or option passed. ' + 'Try the -h/--help option for more information.') + + cmdname = args[0] + if cmdname not in self.commands: + self.parser.error('unknown command "%s"' % cmdname) + + return getattr(self, cmdname)(args[1:]) + + def _configure_logging(self, loglevel): + self.log = logging.getLogger('babel') + self.log.setLevel(loglevel) + # Don't add a new handler for every instance initialization (#227), this + # would cause duplicated output when the CommandLineInterface as an + # normal Python class. + if self.log.handlers: + handler = self.log.handlers[0] + else: + handler = logging.StreamHandler() + self.log.addHandler(handler) + handler.setLevel(loglevel) + formatter = logging.Formatter('%(message)s') + handler.setFormatter(formatter) + + def _help(self): + print(self.parser.format_help()) + print("commands:") + longest = max([len(command) for command in self.commands]) + format = " %%-%ds %%s" % max(8, longest + 1) + commands = sorted(self.commands.items()) + for name, description in commands: + print(format % (name, description)) + + def compile(self, argv): + """Subcommand for compiling a message catalog to a MO file. + + :param argv: the command arguments + :since: version 0.9 + """ + parser = OptionParser(usage=self.usage % ('compile', ''), + description=self.commands['compile']) + parser.add_option('--domain', '-D', dest='domain', + help="domain of MO and PO files (default '%default')") + parser.add_option('--directory', '-d', dest='directory', + metavar='DIR', help='base directory of catalog files') + parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', + help='locale of the catalog') + parser.add_option('--input-file', '-i', dest='input_file', + metavar='FILE', help='name of the input file') + parser.add_option('--output-file', '-o', dest='output_file', + metavar='FILE', + help="name of the output file (default " + "'//LC_MESSAGES/" + ".mo')") + parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy', + action='store_true', + help='also include fuzzy translations (default ' + '%default)') + parser.add_option('--statistics', dest='statistics', + action='store_true', + help='print statistics about translations') + + parser.set_defaults(domain='messages', use_fuzzy=False, + compile_all=False, statistics=False) + options, args = parser.parse_args(argv) + + po_files = [] + mo_files = [] + if not options.input_file: + if not options.directory: + parser.error('you must specify either the input file or the ' + 'base directory') + if options.locale: + po_files.append((options.locale, + os.path.join(options.directory, + options.locale, 'LC_MESSAGES', + options.domain + '.po'))) + mo_files.append(os.path.join(options.directory, options.locale, + 'LC_MESSAGES', + options.domain + '.mo')) + else: + for locale in os.listdir(options.directory): + po_file = os.path.join(options.directory, locale, + 'LC_MESSAGES', options.domain + '.po') + if os.path.exists(po_file): + po_files.append((locale, po_file)) + mo_files.append(os.path.join(options.directory, locale, + 'LC_MESSAGES', + options.domain + '.mo')) + else: + po_files.append((options.locale, options.input_file)) + if options.output_file: + mo_files.append(options.output_file) + else: + if not options.directory: + parser.error('you must specify either the input file or ' + 'the base directory') + mo_files.append(os.path.join(options.directory, options.locale, + 'LC_MESSAGES', + options.domain + '.mo')) + if not po_files: + parser.error('no message catalogs found') + + for idx, (locale, po_file) in enumerate(po_files): + mo_file = mo_files[idx] + infile = open(po_file, 'r') + try: + catalog = read_po(infile, locale) + finally: + infile.close() + + if options.statistics: + translated = 0 + for message in list(catalog)[1:]: + if message.string: + translated +=1 + percentage = 0 + if len(catalog): + percentage = translated * 100 // len(catalog) + self.log.info("%d of %d messages (%d%%) translated in %r", + translated, len(catalog), percentage, po_file) + + if catalog.fuzzy and not options.use_fuzzy: + self.log.warning('catalog %r is marked as fuzzy, skipping', + po_file) + continue + + for message, errors in catalog.check(): + for error in errors: + self.log.error('error: %s:%d: %s', po_file, message.lineno, + error) + + self.log.info('compiling catalog %r to %r', po_file, mo_file) + + outfile = open(mo_file, 'wb') + try: + write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy) + finally: + outfile.close() + + def extract(self, argv): + """Subcommand for extracting messages from source files and generating + a POT file. + + :param argv: the command arguments + """ + parser = OptionParser(usage=self.usage % ('extract', 'dir1 ...'), + description=self.commands['extract']) + parser.add_option('--charset', dest='charset', + help='charset to use in the output (default ' + '"%default")') + parser.add_option('-k', '--keyword', dest='keywords', action='append', + help='keywords to look for in addition to the ' + 'defaults. You can specify multiple -k flags on ' + 'the command line.') + parser.add_option('--no-default-keywords', dest='no_default_keywords', + action='store_true', + help="do not include the default keywords") + parser.add_option('--mapping', '-F', dest='mapping_file', + help='path to the extraction mapping file') + parser.add_option('--no-location', dest='no_location', + action='store_true', + help='do not include location comments with filename ' + 'and line number') + parser.add_option('--omit-header', dest='omit_header', + action='store_true', + help='do not include msgid "" entry in header') + parser.add_option('-o', '--output', dest='output', + help='path to the output POT file') + parser.add_option('-w', '--width', dest='width', type='int', + help="set output line width (default 76)") + parser.add_option('--no-wrap', dest='no_wrap', action='store_true', + help='do not break long message lines, longer than ' + 'the output line width, into several lines') + parser.add_option('--sort-output', dest='sort_output', + action='store_true', + help='generate sorted output (default False)') + parser.add_option('--sort-by-file', dest='sort_by_file', + action='store_true', + help='sort output by file location (default False)') + parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address', + metavar='EMAIL@ADDRESS', + help='set report address for msgid') + parser.add_option('--copyright-holder', dest='copyright_holder', + help='set copyright holder in output') + parser.add_option('--project', dest='project', + help='set project name in output') + parser.add_option('--version', dest='version', + help='set project version in output') + parser.add_option('--add-comments', '-c', dest='comment_tags', + metavar='TAG', action='append', + help='place comment block with TAG (or those ' + 'preceding keyword lines) in output file. One ' + 'TAG per argument call') + parser.add_option('--strip-comment-tags', '-s', + dest='strip_comment_tags', action='store_true', + help='Strip the comment tags from the comments.') + + parser.set_defaults(charset='utf-8', keywords=[], + no_default_keywords=False, no_location=False, + omit_header = False, width=None, no_wrap=False, + sort_output=False, sort_by_file=False, + comment_tags=[], strip_comment_tags=False) + options, args = parser.parse_args(argv) + if not args: + parser.error('incorrect number of arguments') + + keywords = DEFAULT_KEYWORDS.copy() + if options.no_default_keywords: + if not options.keywords: + parser.error('you must specify new keywords if you disable the ' + 'default ones') + keywords = {} + if options.keywords: + keywords.update(parse_keywords(options.keywords)) + + if options.mapping_file: + fileobj = open(options.mapping_file, 'U') + try: + method_map, options_map = parse_mapping(fileobj) + finally: + fileobj.close() + else: + method_map = DEFAULT_MAPPING + options_map = {} + + if options.width and options.no_wrap: + parser.error("'--no-wrap' and '--width' are mutually exclusive.") + elif not options.width and not options.no_wrap: + options.width = 76 + + if options.sort_output and options.sort_by_file: + parser.error("'--sort-output' and '--sort-by-file' are mutually " + "exclusive") + + catalog = Catalog(project=options.project, + version=options.version, + msgid_bugs_address=options.msgid_bugs_address, + copyright_holder=options.copyright_holder, + charset=options.charset) + + for dirname in args: + if not os.path.isdir(dirname): + parser.error('%r is not a directory' % dirname) + + def callback(filename, method, options): + if method == 'ignore': + return + filepath = os.path.normpath(os.path.join(dirname, filename)) + optstr = '' + if options: + optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for + k, v in options.items()]) + self.log.info('extracting messages from %s%s', filepath, + optstr) + + extracted = extract_from_dir(dirname, method_map, options_map, + keywords, options.comment_tags, + callback=callback, + strip_comment_tags= + options.strip_comment_tags) + for filename, lineno, message, comments, context in extracted: + filepath = os.path.normpath(os.path.join(dirname, filename)) + catalog.add(message, None, [(filepath, lineno)], + auto_comments=comments, context=context) + + catalog_charset = catalog.charset + if options.output not in (None, '-'): + self.log.info('writing PO template file to %s' % options.output) + outfile = open(options.output, 'wb') + close_output = True + else: + outfile = sys.stdout + + # This is a bit of a hack on Python 3. stdout is a text stream so + # we need to find the underlying file when we write the PO. In + # later versions of Babel we want the write_po function to accept + # text or binary streams and automatically adjust the encoding. + if not PY2 and hasattr(outfile, 'buffer'): + catalog.charset = outfile.encoding + outfile = outfile.buffer.raw + + close_output = False + + try: + write_po(outfile, catalog, width=options.width, + no_location=options.no_location, + omit_header=options.omit_header, + sort_output=options.sort_output, + sort_by_file=options.sort_by_file) + finally: + if close_output: + outfile.close() + catalog.charset = catalog_charset + + def init(self, argv): + """Subcommand for creating new message catalogs from a template. + + :param argv: the command arguments + """ + parser = OptionParser(usage=self.usage % ('init', ''), + description=self.commands['init']) + parser.add_option('--domain', '-D', dest='domain', + help="domain of PO file (default '%default')") + parser.add_option('--input-file', '-i', dest='input_file', + metavar='FILE', help='name of the input file') + parser.add_option('--output-dir', '-d', dest='output_dir', + metavar='DIR', help='path to output directory') + parser.add_option('--output-file', '-o', dest='output_file', + metavar='FILE', + help="name of the output file (default " + "'//LC_MESSAGES/" + ".po')") + parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', + help='locale for the new localized catalog') + parser.add_option('-w', '--width', dest='width', type='int', + help="set output line width (default 76)") + parser.add_option('--no-wrap', dest='no_wrap', action='store_true', + help='do not break long message lines, longer than ' + 'the output line width, into several lines') + + parser.set_defaults(domain='messages') + options, args = parser.parse_args(argv) + + if not options.locale: + parser.error('you must provide a locale for the new catalog') + try: + locale = Locale.parse(options.locale) + except UnknownLocaleError as e: + parser.error(e) + + if not options.input_file: + parser.error('you must specify the input file') + + if not options.output_file and not options.output_dir: + parser.error('you must specify the output file or directory') + + if not options.output_file: + options.output_file = os.path.join(options.output_dir, + options.locale, 'LC_MESSAGES', + options.domain + '.po') + if not os.path.exists(os.path.dirname(options.output_file)): + os.makedirs(os.path.dirname(options.output_file)) + if options.width and options.no_wrap: + parser.error("'--no-wrap' and '--width' are mutually exclusive.") + elif not options.width and not options.no_wrap: + options.width = 76 + + infile = open(options.input_file, 'r') + try: + # Although reading from the catalog template, read_po must be fed + # the locale in order to correctly calculate plurals + catalog = read_po(infile, locale=options.locale) + finally: + infile.close() + + catalog.locale = locale + catalog.revision_date = datetime.now(LOCALTZ) + + self.log.info('creating catalog %r based on %r', options.output_file, + options.input_file) + + outfile = open(options.output_file, 'wb') + try: + write_po(outfile, catalog, width=options.width) + finally: + outfile.close() + + def update(self, argv): + """Subcommand for updating existing message catalogs from a template. + + :param argv: the command arguments + :since: version 0.9 + """ + parser = OptionParser(usage=self.usage % ('update', ''), + description=self.commands['update']) + parser.add_option('--domain', '-D', dest='domain', + help="domain of PO file (default '%default')") + parser.add_option('--input-file', '-i', dest='input_file', + metavar='FILE', help='name of the input file') + parser.add_option('--output-dir', '-d', dest='output_dir', + metavar='DIR', help='path to output directory') + parser.add_option('--output-file', '-o', dest='output_file', + metavar='FILE', + help="name of the output file (default " + "'//LC_MESSAGES/" + ".po')") + parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', + help='locale of the translations catalog') + parser.add_option('-w', '--width', dest='width', type='int', + help="set output line width (default 76)") + parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true', + help='do not break long message lines, longer than ' + 'the output line width, into several lines') + parser.add_option('--ignore-obsolete', dest='ignore_obsolete', + action='store_true', + help='do not include obsolete messages in the output ' + '(default %default)') + parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching', + action='store_true', + help='do not use fuzzy matching (default %default)') + parser.add_option('--previous', dest='previous', action='store_true', + help='keep previous msgids of translated messages ' + '(default %default)') + + parser.set_defaults(domain='messages', ignore_obsolete=False, + no_fuzzy_matching=False, previous=False) + options, args = parser.parse_args(argv) + + if not options.input_file: + parser.error('you must specify the input file') + if not options.output_file and not options.output_dir: + parser.error('you must specify the output file or directory') + if options.output_file and not options.locale: + parser.error('you must specify the locale') + if options.no_fuzzy_matching and options.previous: + options.previous = False + + po_files = [] + if not options.output_file: + if options.locale: + po_files.append((options.locale, + os.path.join(options.output_dir, + options.locale, 'LC_MESSAGES', + options.domain + '.po'))) + else: + for locale in os.listdir(options.output_dir): + po_file = os.path.join(options.output_dir, locale, + 'LC_MESSAGES', + options.domain + '.po') + if os.path.exists(po_file): + po_files.append((locale, po_file)) + else: + po_files.append((options.locale, options.output_file)) + + domain = options.domain + if not domain: + domain = os.path.splitext(os.path.basename(options.input_file))[0] + + infile = open(options.input_file, 'U') + try: + template = read_po(infile) + finally: + infile.close() + + if not po_files: + parser.error('no message catalogs found') + + if options.width and options.no_wrap: + parser.error("'--no-wrap' and '--width' are mutually exclusive.") + elif not options.width and not options.no_wrap: + options.width = 76 + for locale, filename in po_files: + self.log.info('updating catalog %r based on %r', filename, + options.input_file) + infile = open(filename, 'U') + try: + catalog = read_po(infile, locale=locale, domain=domain) + finally: + infile.close() + + catalog.update(template, options.no_fuzzy_matching) + + tmpname = os.path.join(os.path.dirname(filename), + tempfile.gettempprefix() + + os.path.basename(filename)) + tmpfile = open(tmpname, 'w') + try: + try: + write_po(tmpfile, catalog, + ignore_obsolete=options.ignore_obsolete, + include_previous=options.previous, + width=options.width) + finally: + tmpfile.close() + except: + os.remove(tmpname) + raise + + try: + os.rename(tmpname, filename) + except OSError: + # We're probably on Windows, which doesn't support atomic + # renames, at least not through Python + # If the error is in fact due to a permissions problem, that + # same error is going to be raised from one of the following + # operations + os.remove(filename) + shutil.copy(tmpname, filename) + os.remove(tmpname) + + +def main(): + return CommandLineInterface().run(sys.argv) + + +def parse_mapping(fileobj, filename=None): + """Parse an extraction method mapping from a file-like object. + + >>> buf = BytesIO(b''' + ... [extractors] + ... custom = mypackage.module:myfunc + ... + ... # Python source files + ... [python: **.py] + ... + ... # Genshi templates + ... [genshi: **/templates/**.html] + ... include_attrs = + ... [genshi: **/templates/**.txt] + ... template_class = genshi.template:TextTemplate + ... encoding = latin-1 + ... + ... # Some custom extractor + ... [custom: **/custom/*.*] + ... ''') + + >>> method_map, options_map = parse_mapping(buf) + >>> len(method_map) + 4 + + >>> method_map[0] + ('**.py', 'python') + >>> options_map['**.py'] + {} + >>> method_map[1] + ('**/templates/**.html', 'genshi') + >>> options_map['**/templates/**.html']['include_attrs'] + '' + >>> method_map[2] + ('**/templates/**.txt', 'genshi') + >>> options_map['**/templates/**.txt']['template_class'] + 'genshi.template:TextTemplate' + >>> options_map['**/templates/**.txt']['encoding'] + 'latin-1' + + >>> method_map[3] + ('**/custom/*.*', 'mypackage.module:myfunc') + >>> options_map['**/custom/*.*'] + {} + + :param fileobj: a readable file-like object containing the configuration + text to parse + :see: `extract_from_directory` + """ + extractors = {} + method_map = [] + options_map = {} + + parser = RawConfigParser() + parser._sections = odict(parser._sections) # We need ordered sections + parser.readfp(fileobj, filename) + for section in parser.sections(): + if section == 'extractors': + extractors = dict(parser.items(section)) + else: + method, pattern = [part.strip() for part in section.split(':', 1)] + method_map.append((pattern, method)) + options_map[pattern] = dict(parser.items(section)) + + if extractors: + for idx, (pattern, method) in enumerate(method_map): + if method in extractors: + method = extractors[method] + method_map[idx] = (pattern, method) + + return (method_map, options_map) + + +def parse_keywords(strings=[]): + """Parse keywords specifications from the given list of strings. + + >>> kw = parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items() + >>> kw.sort() + >>> for keyword, indices in kw: + ... print (keyword, indices) + ('_', None) + ('dgettext', (2,)) + ('dngettext', (2, 3)) + ('pgettext', ((1, 'c'), 2)) + """ + keywords = {} + for string in strings: + if ':' in string: + funcname, indices = string.split(':') + else: + funcname, indices = string, None + if funcname not in keywords: + if indices: + inds = [] + for x in indices.split(','): + if x[-1] == 'c': + inds.append((int(x[:-1]), 'c')) + else: + inds.append(int(x)) + indices = tuple(inds) + keywords[funcname] = indices + return keywords + + +if __name__ == '__main__': + main() diff --git a/awx/lib/site-packages/babel/messages/jslexer.py b/awx/lib/site-packages/babel/messages/jslexer.py new file mode 100644 index 0000000000..22c6e1f9ce --- /dev/null +++ b/awx/lib/site-packages/babel/messages/jslexer.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +""" + babel.messages.jslexer + ~~~~~~~~~~~~~~~~~~~~~~ + + A simple JavaScript 1.5 lexer which is used for the JavaScript + extractor. + + :copyright: (c) 2013 by the Babel Team. + :license: BSD, see LICENSE for more details. +""" + +from operator import itemgetter +import re +from babel._compat import unichr + +operators = [ + '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=', + '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=', + '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')', + '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':' +] +operators.sort(key=lambda a: -len(a)) + +escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'} + +rules = [ + (None, re.compile(r'\s+(?u)')), + (None, re.compile(r' Creating New Volume' +volume = Volume.create() +print volume + +print '--> Creating New Server' +server_list = Server.create() +server = server_list[0] +print server + +print '----> Waiting for Server to start up' +while server.status != 'running': + print '*' + time.sleep(10) +print '----> Server is running' + +print '--> Run "df -k" on Server' +status = server.run('df -k') +print status[1] + +print '--> Now run volume.make_ready to make the volume ready to use on server' +volume.make_ready(server) + +print '--> Run "df -k" on Server' +status = server.run('df -k') +print status[1] + +print '--> Do an "ls -al" on the new filesystem' +status = server.run('ls -al %s' % volume.mount_point) +print status[1] + diff --git a/awx/lib/site-packages/boto/manage/volume.py b/awx/lib/site-packages/boto/manage/volume.py new file mode 100644 index 0000000000..49237d47ee --- /dev/null +++ b/awx/lib/site-packages/boto/manage/volume.py @@ -0,0 +1,420 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from __future__ import with_statement +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, ListProperty, ReferenceProperty, CalculatedProperty +from boto.manage.server import Server +from boto.manage import propget +import boto.utils +import boto.ec2 +import time +import traceback +from contextlib import closing +import datetime + + +class CommandLineGetter(object): + + def get_region(self, params): + if not params.get('region', None): + prop = self.cls.find_property('region_name') + params['region'] = propget.get(prop, choices=boto.ec2.regions) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get_name(self, params): + if not params.get('name', None): + prop = self.cls.find_property('name') + params['name'] = propget.get(prop) + + def get_size(self, params): + if not params.get('size', None): + prop = IntegerProperty(name='size', verbose_name='Size (GB)') + params['size'] = propget.get(prop) + + def get_mount_point(self, params): + if not params.get('mount_point', None): + prop = self.cls.find_property('mount_point') + params['mount_point'] = propget.get(prop) + + def get_device(self, params): + if not params.get('device', None): + prop = self.cls.find_property('device') + params['device'] = propget.get(prop) + + def get(self, cls, params): + self.cls = cls + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_zone(params) + self.get_name(params) + self.get_size(params) + self.get_mount_point(params) + self.get_device(params) + +class Volume(Model): + + name = StringProperty(required=True, unique=True, verbose_name='Name') + region_name = StringProperty(required=True, verbose_name='EC2 Region') + zone_name = StringProperty(required=True, verbose_name='EC2 Zone') + mount_point = StringProperty(verbose_name='Mount Point') + device = StringProperty(verbose_name="Device Name", default='/dev/sdp') + volume_id = StringProperty(required=True) + past_volume_ids = ListProperty(item_type=str) + server = ReferenceProperty(Server, collection_name='volumes', + verbose_name='Server Attached To') + volume_state = CalculatedProperty(verbose_name="Volume State", + calculated_type=str, use_method=True) + attachment_state = CalculatedProperty(verbose_name="Attachment State", + calculated_type=str, use_method=True) + size = CalculatedProperty(verbose_name="Size (GB)", + calculated_type=int, use_method=True) + + @classmethod + def create(cls, **params): + getter = CommandLineGetter() + getter.get(cls, params) + region = params.get('region') + ec2 = region.connect() + zone = params.get('zone') + size = params.get('size') + ebs_volume = ec2.create_volume(size, zone.name) + v = cls() + v.ec2 = ec2 + v.volume_id = ebs_volume.id + v.name = params.get('name') + v.mount_point = params.get('mount_point') + v.device = params.get('device') + v.region_name = region.name + v.zone_name = zone.name + v.put() + return v + + @classmethod + def create_from_volume_id(cls, region_name, volume_id, name): + vol = None + ec2 = boto.ec2.connect_to_region(region_name) + rs = ec2.get_all_volumes([volume_id]) + if len(rs) == 1: + v = rs[0] + vol = cls() + vol.volume_id = v.id + vol.name = name + vol.region_name = v.region.name + vol.zone_name = v.zone + vol.put() + return vol + + def create_from_latest_snapshot(self, name, size=None): + snapshot = self.get_snapshots()[-1] + return self.create_from_snapshot(name, snapshot, size) + + def create_from_snapshot(self, name, snapshot, size=None): + if size < self.size: + size = self.size + ec2 = self.get_ec2_connection() + if self.zone_name == None or self.zone_name == '': + # deal with the migration case where the zone is not set in the logical volume: + current_volume = ec2.get_all_volumes([self.volume_id])[0] + self.zone_name = current_volume.zone + ebs_volume = ec2.create_volume(size, self.zone_name, snapshot) + v = Volume() + v.ec2 = self.ec2 + v.volume_id = ebs_volume.id + v.name = name + v.mount_point = self.mount_point + v.device = self.device + v.region_name = self.region_name + v.zone_name = self.zone_name + v.put() + return v + + def get_ec2_connection(self): + if self.server: + return self.server.ec2 + if not hasattr(self, 'ec2') or self.ec2 == None: + self.ec2 = boto.ec2.connect_to_region(self.region_name) + return self.ec2 + + def _volume_state(self): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + return rs[0].volume_state() + + def _attachment_state(self): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + return rs[0].attachment_state() + + def _size(self): + if not hasattr(self, '__size'): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + self.__size = rs[0].size + return self.__size + + def install_xfs(self): + if self.server: + self.server.install('xfsprogs xfsdump') + + def get_snapshots(self): + """ + Returns a list of all completed snapshots for this volume ID. + """ + ec2 = self.get_ec2_connection() + rs = ec2.get_all_snapshots() + all_vols = [self.volume_id] + self.past_volume_ids + snaps = [] + for snapshot in rs: + if snapshot.volume_id in all_vols: + if snapshot.progress == '100%': + snapshot.date = boto.utils.parse_ts(snapshot.start_time) + snapshot.keep = True + snaps.append(snapshot) + snaps.sort(cmp=lambda x, y: cmp(x.date, y.date)) + return snaps + + def attach(self, server=None): + if self.attachment_state == 'attached': + print 'already attached' + return None + if server: + self.server = server + self.put() + ec2 = self.get_ec2_connection() + ec2.attach_volume(self.volume_id, self.server.instance_id, self.device) + + def detach(self, force=False): + state = self.attachment_state + if state == 'available' or state == None or state == 'detaching': + print 'already detached' + return None + ec2 = self.get_ec2_connection() + ec2.detach_volume(self.volume_id, self.server.instance_id, self.device, force) + self.server = None + self.put() + + def checkfs(self, use_cmd=None): + if self.server == None: + raise ValueError('server attribute must be set to run this command') + # detemine state of file system on volume, only works if attached + if use_cmd: + cmd = use_cmd + else: + cmd = self.server.get_cmdshell() + status = cmd.run('xfs_check %s' % self.device) + if not use_cmd: + cmd.close() + if status[1].startswith('bad superblock magic number 0'): + return False + return True + + def wait(self): + if self.server == None: + raise ValueError('server attribute must be set to run this command') + with closing(self.server.get_cmdshell()) as cmd: + # wait for the volume device to appear + cmd = self.server.get_cmdshell() + while not cmd.exists(self.device): + boto.log.info('%s still does not exist, waiting 10 seconds' % self.device) + time.sleep(10) + + def format(self): + if self.server == None: + raise ValueError('server attribute must be set to run this command') + status = None + with closing(self.server.get_cmdshell()) as cmd: + if not self.checkfs(cmd): + boto.log.info('make_fs...') + status = cmd.run('mkfs -t xfs %s' % self.device) + return status + + def mount(self): + if self.server == None: + raise ValueError('server attribute must be set to run this command') + boto.log.info('handle_mount_point') + with closing(self.server.get_cmdshell()) as cmd: + cmd = self.server.get_cmdshell() + if not cmd.isdir(self.mount_point): + boto.log.info('making directory') + # mount directory doesn't exist so create it + cmd.run("mkdir %s" % self.mount_point) + else: + boto.log.info('directory exists already') + status = cmd.run('mount -l') + lines = status[1].split('\n') + for line in lines: + t = line.split() + if t and t[2] == self.mount_point: + # something is already mounted at the mount point + # unmount that and mount it as /tmp + if t[0] != self.device: + cmd.run('umount %s' % self.mount_point) + cmd.run('mount %s /tmp' % t[0]) + cmd.run('chmod 777 /tmp') + break + # Mount up our new EBS volume onto mount_point + cmd.run("mount %s %s" % (self.device, self.mount_point)) + cmd.run('xfs_growfs %s' % self.mount_point) + + def make_ready(self, server): + self.server = server + self.put() + self.install_xfs() + self.attach() + self.wait() + self.format() + self.mount() + + def freeze(self): + if self.server: + return self.server.run("/usr/sbin/xfs_freeze -f %s" % self.mount_point) + + def unfreeze(self): + if self.server: + return self.server.run("/usr/sbin/xfs_freeze -u %s" % self.mount_point) + + def snapshot(self): + # if this volume is attached to a server + # we need to freeze the XFS file system + try: + self.freeze() + if self.server == None: + snapshot = self.get_ec2_connection().create_snapshot(self.volume_id) + else: + snapshot = self.server.ec2.create_snapshot(self.volume_id) + boto.log.info('Snapshot of Volume %s created: %s' % (self.name, snapshot)) + except Exception: + boto.log.info('Snapshot error') + boto.log.info(traceback.format_exc()) + finally: + status = self.unfreeze() + return status + + def get_snapshot_range(self, snaps, start_date=None, end_date=None): + l = [] + for snap in snaps: + if start_date and end_date: + if snap.date >= start_date and snap.date <= end_date: + l.append(snap) + elif start_date: + if snap.date >= start_date: + l.append(snap) + elif end_date: + if snap.date <= end_date: + l.append(snap) + else: + l.append(snap) + return l + + def trim_snapshots(self, delete=False): + """ + Trim the number of snapshots for this volume. This method always + keeps the oldest snapshot. It then uses the parameters passed in + to determine how many others should be kept. + + The algorithm is to keep all snapshots from the current day. Then + it will keep the first snapshot of the day for the previous seven days. + Then, it will keep the first snapshot of the week for the previous + four weeks. After than, it will keep the first snapshot of the month + for as many months as there are. + + """ + snaps = self.get_snapshots() + # Always keep the oldest and the newest + if len(snaps) <= 2: + return snaps + snaps = snaps[1:-1] + now = datetime.datetime.now(snaps[0].date.tzinfo) + midnight = datetime.datetime(year=now.year, month=now.month, + day=now.day, tzinfo=now.tzinfo) + # Keep the first snapshot from each day of the previous week + one_week = datetime.timedelta(days=7, seconds=60*60) + print midnight-one_week, midnight + previous_week = self.get_snapshot_range(snaps, midnight-one_week, midnight) + print previous_week + if not previous_week: + return snaps + current_day = None + for snap in previous_week: + if current_day and current_day == snap.date.day: + snap.keep = False + else: + current_day = snap.date.day + # Get ourselves onto the next full week boundary + if previous_week: + week_boundary = previous_week[0].date + if week_boundary.weekday() != 0: + delta = datetime.timedelta(days=week_boundary.weekday()) + week_boundary = week_boundary - delta + # Keep one within this partial week + partial_week = self.get_snapshot_range(snaps, week_boundary, previous_week[0].date) + if len(partial_week) > 1: + for snap in partial_week[1:]: + snap.keep = False + # Keep the first snapshot of each week for the previous 4 weeks + for i in range(0, 4): + weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary) + if len(weeks_worth) > 1: + for snap in weeks_worth[1:]: + snap.keep = False + week_boundary = week_boundary - one_week + # Now look through all remaining snaps and keep one per month + remainder = self.get_snapshot_range(snaps, end_date=week_boundary) + current_month = None + for snap in remainder: + if current_month and current_month == snap.date.month: + snap.keep = False + else: + current_month = snap.date.month + if delete: + for snap in snaps: + if not snap.keep: + boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name)) + snap.delete() + return snaps + + def grow(self, size): + pass + + def copy(self, snapshot): + pass + + def get_snapshot_from_date(self, date): + pass + + def delete(self, delete_ebs_volume=False): + if delete_ebs_volume: + self.detach() + ec2 = self.get_ec2_connection() + ec2.delete_volume(self.volume_id) + Model.delete(self) + + def archive(self): + # snapshot volume, trim snaps, delete volume-id + pass + + diff --git a/awx/lib/site-packages/boto/mashups/__init__.py b/awx/lib/site-packages/boto/mashups/__init__.py new file mode 100644 index 0000000000..449bd162a8 --- /dev/null +++ b/awx/lib/site-packages/boto/mashups/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/awx/lib/site-packages/boto/mashups/interactive.py b/awx/lib/site-packages/boto/mashups/interactive.py new file mode 100644 index 0000000000..b80e661e5f --- /dev/null +++ b/awx/lib/site-packages/boto/mashups/interactive.py @@ -0,0 +1,97 @@ +# Copyright (C) 2003-2007 Robey Pointer +# +# This file is part of paramiko. +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + + +import socket +import sys + +# windows does not have termios... +try: + import termios + import tty + has_termios = True +except ImportError: + has_termios = False + + +def interactive_shell(chan): + if has_termios: + posix_shell(chan) + else: + windows_shell(chan) + + +def posix_shell(chan): + import select + + oldtty = termios.tcgetattr(sys.stdin) + try: + tty.setraw(sys.stdin.fileno()) + tty.setcbreak(sys.stdin.fileno()) + chan.settimeout(0.0) + + while True: + r, w, e = select.select([chan, sys.stdin], [], []) + if chan in r: + try: + x = chan.recv(1024) + if len(x) == 0: + print '\r\n*** EOF\r\n', + break + sys.stdout.write(x) + sys.stdout.flush() + except socket.timeout: + pass + if sys.stdin in r: + x = sys.stdin.read(1) + if len(x) == 0: + break + chan.send(x) + + finally: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty) + + +# thanks to Mike Looijmans for this code +def windows_shell(chan): + import threading + + sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n") + + def writeall(sock): + while True: + data = sock.recv(256) + if not data: + sys.stdout.write('\r\n*** EOF ***\r\n\r\n') + sys.stdout.flush() + break + sys.stdout.write(data) + sys.stdout.flush() + + writer = threading.Thread(target=writeall, args=(chan,)) + writer.start() + + try: + while True: + d = sys.stdin.read(1) + if not d: + break + chan.send(d) + except EOFError: + # user hit ^Z or F6 + pass diff --git a/awx/lib/site-packages/boto/mashups/iobject.py b/awx/lib/site-packages/boto/mashups/iobject.py new file mode 100644 index 0000000000..de74287bc5 --- /dev/null +++ b/awx/lib/site-packages/boto/mashups/iobject.py @@ -0,0 +1,115 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os + +def int_val_fn(v): + try: + int(v) + return True + except: + return False + +class IObject(object): + + def choose_from_list(self, item_list, search_str='', + prompt='Enter Selection'): + if not item_list: + print 'No Choices Available' + return + choice = None + while not choice: + n = 1 + choices = [] + for item in item_list: + if isinstance(item, basestring): + print '[%d] %s' % (n, item) + choices.append(item) + n += 1 + else: + obj, id, desc = item + if desc: + if desc.find(search_str) >= 0: + print '[%d] %s - %s' % (n, id, desc) + choices.append(obj) + n += 1 + else: + if id.find(search_str) >= 0: + print '[%d] %s' % (n, id) + choices.append(obj) + n += 1 + if choices: + val = raw_input('%s[1-%d]: ' % (prompt, len(choices))) + if val.startswith('/'): + search_str = val[1:] + else: + try: + int_val = int(val) + if int_val == 0: + return None + choice = choices[int_val-1] + except ValueError: + print '%s is not a valid choice' % val + except IndexError: + print '%s is not within the range[1-%d]' % (val, + len(choices)) + else: + print "No objects matched your pattern" + search_str = '' + return choice + + def get_string(self, prompt, validation_fn=None): + okay = False + while not okay: + val = raw_input('%s: ' % prompt) + if validation_fn: + okay = validation_fn(val) + if not okay: + print 'Invalid value: %s' % val + else: + okay = True + return val + + def get_filename(self, prompt): + okay = False + val = '' + while not okay: + val = raw_input('%s: %s' % (prompt, val)) + val = os.path.expanduser(val) + if os.path.isfile(val): + okay = True + elif os.path.isdir(val): + path = val + val = self.choose_from_list(os.listdir(path)) + if val: + val = os.path.join(path, val) + okay = True + else: + val = '' + else: + print 'Invalid value: %s' % val + val = '' + return val + + def get_int(self, prompt): + s = self.get_string(prompt, int_val_fn) + return int(s) + diff --git a/awx/lib/site-packages/boto/mashups/order.py b/awx/lib/site-packages/boto/mashups/order.py new file mode 100644 index 0000000000..6efdc3ecab --- /dev/null +++ b/awx/lib/site-packages/boto/mashups/order.py @@ -0,0 +1,211 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +High-level abstraction of an EC2 order for servers +""" + +import boto +import boto.ec2 +from boto.mashups.server import Server, ServerSet +from boto.mashups.iobject import IObject +from boto.pyami.config import Config +from boto.sdb.persist import get_domain, set_domain +import time, StringIO + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge'] + +class Item(IObject): + + def __init__(self): + self.region = None + self.name = None + self.instance_type = None + self.quantity = 0 + self.zone = None + self.ami = None + self.groups = [] + self.key = None + self.ec2 = None + self.config = None + + def set_userdata(self, key, value): + self.userdata[key] = value + + def get_userdata(self, key): + return self.userdata[key] + + def set_region(self, region=None): + if region: + self.region = region + else: + l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()] + self.region = self.choose_from_list(l, prompt='Choose Region') + + def set_name(self, name=None): + if name: + self.name = name + else: + self.name = self.get_string('Name') + + def set_instance_type(self, instance_type=None): + if instance_type: + self.instance_type = instance_type + else: + self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type') + + def set_quantity(self, n=0): + if n > 0: + self.quantity = n + else: + self.quantity = self.get_int('Quantity') + + def set_zone(self, zone=None): + if zone: + self.zone = zone + else: + l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()] + self.zone = self.choose_from_list(l, prompt='Choose Availability Zone') + + def set_ami(self, ami=None): + if ami: + self.ami = ami + else: + l = [(a, a.id, a.location) for a in self.ec2.get_all_images()] + self.ami = self.choose_from_list(l, prompt='Choose AMI') + + def add_group(self, group=None): + if group: + self.groups.append(group) + else: + l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()] + self.groups.append(self.choose_from_list(l, prompt='Choose Security Group')) + + def set_key(self, key=None): + if key: + self.key = key + else: + l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()] + self.key = self.choose_from_list(l, prompt='Choose Keypair') + + def update_config(self): + if not self.config.has_section('Credentials'): + self.config.add_section('Credentials') + self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id) + self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key) + if not self.config.has_section('Pyami'): + self.config.add_section('Pyami') + sdb_domain = get_domain() + if sdb_domain: + self.config.set('Pyami', 'server_sdb_domain', sdb_domain) + self.config.set('Pyami', 'server_sdb_name', self.name) + + def set_config(self, config_path=None): + if not config_path: + config_path = self.get_filename('Specify Config file') + self.config = Config(path=config_path) + + def get_userdata_string(self): + s = StringIO.StringIO() + self.config.write(s) + return s.getvalue() + + def enter(self, **params): + self.region = params.get('region', self.region) + if not self.region: + self.set_region() + self.ec2 = self.region.connect() + self.name = params.get('name', self.name) + if not self.name: + self.set_name() + self.instance_type = params.get('instance_type', self.instance_type) + if not self.instance_type: + self.set_instance_type() + self.zone = params.get('zone', self.zone) + if not self.zone: + self.set_zone() + self.quantity = params.get('quantity', self.quantity) + if not self.quantity: + self.set_quantity() + self.ami = params.get('ami', self.ami) + if not self.ami: + self.set_ami() + self.groups = params.get('groups', self.groups) + if not self.groups: + self.add_group() + self.key = params.get('key', self.key) + if not self.key: + self.set_key() + self.config = params.get('config', self.config) + if not self.config: + self.set_config() + self.update_config() + +class Order(IObject): + + def __init__(self): + self.items = [] + self.reservation = None + + def add_item(self, **params): + item = Item() + item.enter(**params) + self.items.append(item) + + def display(self): + print 'This Order consists of the following items' + print + print 'QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair' + for item in self.items: + print '%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type, + item.ami.id, item.groups, item.key.name) + + def place(self, block=True): + if get_domain() == None: + print 'SDB Persistence Domain not set' + domain_name = self.get_string('Specify SDB Domain') + set_domain(domain_name) + s = ServerSet() + for item in self.items: + r = item.ami.run(min_count=1, max_count=item.quantity, + key_name=item.key.name, user_data=item.get_userdata_string(), + security_groups=item.groups, instance_type=item.instance_type, + placement=item.zone.name) + if block: + states = [i.state for i in r.instances] + if states.count('running') != len(states): + print states + time.sleep(15) + states = [i.update() for i in r.instances] + for i in r.instances: + server = Server() + server.name = item.name + server.instance_id = i.id + server.reservation = r + server.save() + s.append(server) + if len(s) == 1: + return s[0] + else: + return s + + + diff --git a/awx/lib/site-packages/boto/mashups/server.py b/awx/lib/site-packages/boto/mashups/server.py new file mode 100644 index 0000000000..aa564471c0 --- /dev/null +++ b/awx/lib/site-packages/boto/mashups/server.py @@ -0,0 +1,395 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +High-level abstraction of an EC2 server +""" +import boto +import boto.utils +from boto.mashups.iobject import IObject +from boto.pyami.config import Config, BotoConfigPath +from boto.mashups.interactive import interactive_shell +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty +import os +import StringIO + + +class ServerSet(list): + + def __getattr__(self, name): + results = [] + is_callable = False + for server in self: + try: + val = getattr(server, name) + if callable(val): + is_callable = True + results.append(val) + except: + results.append(None) + if is_callable: + self.map_list = results + return self.map + return results + + def map(self, *args): + results = [] + for fn in self.map_list: + results.append(fn(*args)) + return results + +class Server(Model): + + @property + def ec2(self): + if self._ec2 is None: + self._ec2 = boto.connect_ec2() + return self._ec2 + + @classmethod + def Inventory(cls): + """ + Returns a list of Server instances, one for each Server object + persisted in the db + """ + l = ServerSet() + rs = cls.find() + for server in rs: + l.append(server) + return l + + @classmethod + def Register(cls, name, instance_id, description=''): + s = cls() + s.name = name + s.instance_id = instance_id + s.description = description + s.save() + return s + + def __init__(self, id=None, **kw): + Model.__init__(self, id, **kw) + self._reservation = None + self._instance = None + self._ssh_client = None + self._pkey = None + self._config = None + self._ec2 = None + + name = StringProperty(unique=True, verbose_name="Name") + instance_id = StringProperty(verbose_name="Instance ID") + config_uri = StringProperty() + ami_id = StringProperty(verbose_name="AMI ID") + zone = StringProperty(verbose_name="Availability Zone") + security_group = StringProperty(verbose_name="Security Group", default="default") + key_name = StringProperty(verbose_name="Key Name") + elastic_ip = StringProperty(verbose_name="Elastic IP") + instance_type = StringProperty(verbose_name="Instance Type") + description = StringProperty(verbose_name="Description") + log = StringProperty() + + def setReadOnly(self, value): + raise AttributeError + + def getInstance(self): + if not self._instance: + if self.instance_id: + try: + rs = self.ec2.get_all_reservations([self.instance_id]) + except: + return None + if len(rs) > 0: + self._reservation = rs[0] + self._instance = self._reservation.instances[0] + return self._instance + + instance = property(getInstance, setReadOnly, None, 'The Instance for the server') + + def getAMI(self): + if self.instance: + return self.instance.image_id + + ami = property(getAMI, setReadOnly, None, 'The AMI for the server') + + def getStatus(self): + if self.instance: + self.instance.update() + return self.instance.state + + status = property(getStatus, setReadOnly, None, + 'The status of the server') + + def getHostname(self): + if self.instance: + return self.instance.public_dns_name + + hostname = property(getHostname, setReadOnly, None, + 'The public DNS name of the server') + + def getPrivateHostname(self): + if self.instance: + return self.instance.private_dns_name + + private_hostname = property(getPrivateHostname, setReadOnly, None, + 'The private DNS name of the server') + + def getLaunchTime(self): + if self.instance: + return self.instance.launch_time + + launch_time = property(getLaunchTime, setReadOnly, None, + 'The time the Server was started') + + def getConsoleOutput(self): + if self.instance: + return self.instance.get_console_output() + + console_output = property(getConsoleOutput, setReadOnly, None, + 'Retrieve the console output for server') + + def getGroups(self): + if self._reservation: + return self._reservation.groups + else: + return None + + groups = property(getGroups, setReadOnly, None, + 'The Security Groups controlling access to this server') + + def getConfig(self): + if not self._config: + remote_file = BotoConfigPath + local_file = '%s.ini' % self.instance.id + self.get_file(remote_file, local_file) + self._config = Config(local_file) + return self._config + + def setConfig(self, config): + local_file = '%s.ini' % self.instance.id + fp = open(local_file) + config.write(fp) + fp.close() + self.put_file(local_file, BotoConfigPath) + self._config = config + + config = property(getConfig, setConfig, None, + 'The instance data for this server') + + def set_config(self, config): + """ + Set SDB based config + """ + self._config = config + self._config.dump_to_sdb("botoConfigs", self.id) + + def load_config(self): + self._config = Config(do_load=False) + self._config.load_from_sdb("botoConfigs", self.id) + + def stop(self): + if self.instance: + self.instance.stop() + + def start(self): + self.stop() + ec2 = boto.connect_ec2() + ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0] + groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)]) + if not self._config: + self.load_config() + if not self._config.has_section("Credentials"): + self._config.add_section("Credentials") + self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id) + self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key) + + if not self._config.has_section("Pyami"): + self._config.add_section("Pyami") + + if self._manager.domain: + self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name) + self._config.set("Pyami", 'server_sdb_name', self.name) + + cfg = StringIO.StringIO() + self._config.write(cfg) + cfg = cfg.getvalue() + r = ami.run(min_count=1, + max_count=1, + key_name=self.key_name, + security_groups = groups, + instance_type = self.instance_type, + placement = self.zone, + user_data = cfg) + i = r.instances[0] + self.instance_id = i.id + self.put() + if self.elastic_ip: + ec2.associate_address(self.instance_id, self.elastic_ip) + + def reboot(self): + if self.instance: + self.instance.reboot() + + def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts', + uname='root'): + import paramiko + if not self.instance: + print 'No instance yet!' + return + if not self._ssh_client: + if not key_file: + iobject = IObject() + key_file = iobject.get_filename('Path to OpenSSH Key file') + self._pkey = paramiko.RSAKey.from_private_key_file(key_file) + self._ssh_client = paramiko.SSHClient() + self._ssh_client.load_system_host_keys() + self._ssh_client.load_host_keys(os.path.expanduser(host_key_file)) + self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self._ssh_client.connect(self.instance.public_dns_name, + username=uname, pkey=self._pkey) + return self._ssh_client + + def get_file(self, remotepath, localpath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + sftp_client.get(remotepath, localpath) + + def put_file(self, localpath, remotepath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + sftp_client.put(localpath, remotepath) + + def listdir(self, remotepath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + return sftp_client.listdir(remotepath) + + def shell(self, key_file=None): + ssh_client = self.get_ssh_client(key_file) + channel = ssh_client.invoke_shell() + interactive_shell(channel) + + def bundle_image(self, prefix, key_file, cert_file, size): + print 'bundling image...' + print '\tcopying cert and pk over to /mnt directory on server' + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + path, name = os.path.split(key_file) + remote_key_file = '/mnt/%s' % name + self.put_file(key_file, remote_key_file) + path, name = os.path.split(cert_file) + remote_cert_file = '/mnt/%s' % name + self.put_file(cert_file, remote_cert_file) + print '\tdeleting %s' % BotoConfigPath + # delete the metadata.ini file if it exists + try: + sftp_client.remove(BotoConfigPath) + except: + pass + command = 'sudo ec2-bundle-vol ' + command += '-c %s -k %s ' % (remote_cert_file, remote_key_file) + command += '-u %s ' % self._reservation.owner_id + command += '-p %s ' % prefix + command += '-s %d ' % size + command += '-d /mnt ' + if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium': + command += '-r i386' + else: + command += '-r x86_64' + print '\t%s' % command + t = ssh_client.exec_command(command) + response = t[1].read() + print '\t%s' % response + print '\t%s' % t[2].read() + print '...complete!' + + def upload_bundle(self, bucket, prefix): + print 'uploading bundle...' + command = 'ec2-upload-bundle ' + command += '-m /mnt/%s.manifest.xml ' % prefix + command += '-b %s ' % bucket + command += '-a %s ' % self.ec2.aws_access_key_id + command += '-s %s ' % self.ec2.aws_secret_access_key + print '\t%s' % command + ssh_client = self.get_ssh_client() + t = ssh_client.exec_command(command) + response = t[1].read() + print '\t%s' % response + print '\t%s' % t[2].read() + print '...complete!' + + def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None): + iobject = IObject() + if not bucket: + bucket = iobject.get_string('Name of S3 bucket') + if not prefix: + prefix = iobject.get_string('Prefix for AMI file') + if not key_file: + key_file = iobject.get_filename('Path to RSA private key file') + if not cert_file: + cert_file = iobject.get_filename('Path to RSA public cert file') + if not size: + size = iobject.get_int('Size (in MB) of bundled image') + self.bundle_image(prefix, key_file, cert_file, size) + self.upload_bundle(bucket, prefix) + print 'registering image...' + self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix)) + return self.image_id + + def attach_volume(self, volume, device="/dev/sdp"): + """ + Attach an EBS volume to this server + + :param volume: EBS Volume to attach + :type volume: boto.ec2.volume.Volume + + :param device: Device to attach to (default to /dev/sdp) + :type device: string + """ + if hasattr(volume, "id"): + volume_id = volume.id + else: + volume_id = volume + return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device) + + def detach_volume(self, volume): + """ + Detach an EBS volume from this server + + :param volume: EBS Volume to detach + :type volume: boto.ec2.volume.Volume + """ + if hasattr(volume, "id"): + volume_id = volume.id + else: + volume_id = volume + return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id) + + def install_package(self, package_name): + print 'installing %s...' % package_name + command = 'yum -y install %s' % package_name + print '\t%s' % command + ssh_client = self.get_ssh_client() + t = ssh_client.exec_command(command) + response = t[1].read() + print '\t%s' % response + print '\t%s' % t[2].read() + print '...complete!' diff --git a/awx/lib/site-packages/boto/mturk/__init__.py b/awx/lib/site-packages/boto/mturk/__init__.py new file mode 100644 index 0000000000..449bd162a8 --- /dev/null +++ b/awx/lib/site-packages/boto/mturk/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/awx/lib/site-packages/boto/mturk/connection.py b/awx/lib/site-packages/boto/mturk/connection.py new file mode 100644 index 0000000000..ad6678497c --- /dev/null +++ b/awx/lib/site-packages/boto/mturk/connection.py @@ -0,0 +1,1027 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +import datetime +import itertools + +from boto import handler +from boto import config +from boto.mturk.price import Price +import boto.mturk.notification +from boto.connection import AWSQueryConnection +from boto.exception import EC2ResponseError +from boto.resultset import ResultSet +from boto.mturk.question import QuestionForm, ExternalQuestion, HTMLQuestion + + +class MTurkRequestError(EC2ResponseError): + "Error for MTurk Requests" + # todo: subclass from an abstract parent of EC2ResponseError + + +class MTurkConnection(AWSQueryConnection): + + APIVersion = '2012-03-25' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=None, debug=0, + https_connection_factory=None): + if not host: + if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True': + host = 'mechanicalturk.sandbox.amazonaws.com' + else: + host = 'mechanicalturk.amazonaws.com' + self.debug = debug + + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, host, debug, + https_connection_factory) + + def _required_auth_capability(self): + return ['mturk'] + + def get_account_balance(self): + """ + """ + params = {} + return self._process_request('GetAccountBalance', params, + [('AvailableBalance', Price), + ('OnHoldBalance', Price)]) + + def register_hit_type(self, title, description, reward, duration, + keywords=None, approval_delay=None, qual_req=None): + """ + Register a new HIT Type + title, description are strings + reward is a Price object + duration can be a timedelta, or an object castable to an int + """ + params = dict( + Title=title, + Description=description, + AssignmentDurationInSeconds=self.duration_as_seconds(duration), + ) + params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward')) + + if keywords: + params['Keywords'] = self.get_keywords_as_string(keywords) + + if approval_delay is not None: + d = self.duration_as_seconds(approval_delay) + params['AutoApprovalDelayInSeconds'] = d + + if qual_req is not None: + params.update(qual_req.get_as_params()) + + return self._process_request('RegisterHITType', params, + [('HITTypeId', HITTypeId)]) + + def set_email_notification(self, hit_type, email, event_types=None): + """ + Performs a SetHITTypeNotification operation to set email + notification for a specified HIT type + """ + return self._set_notification(hit_type, 'Email', email, + 'SetHITTypeNotification', event_types) + + def set_rest_notification(self, hit_type, url, event_types=None): + """ + Performs a SetHITTypeNotification operation to set REST notification + for a specified HIT type + """ + return self._set_notification(hit_type, 'REST', url, + 'SetHITTypeNotification', event_types) + + def set_sqs_notification(self, hit_type, queue_url, event_types=None): + """ + Performs a SetHITTypeNotification operation so set SQS notification + for a specified HIT type. Queue URL is of form: + https://queue.amazonaws.com// and can be + found when looking at the details for a Queue in the AWS Console + """ + return self._set_notification(hit_type, "SQS", queue_url, + 'SetHITTypeNotification', event_types) + + def send_test_event_notification(self, hit_type, url, + event_types=None, + test_event_type='Ping'): + """ + Performs a SendTestEventNotification operation with REST notification + for a specified HIT type + """ + return self._set_notification(hit_type, 'REST', url, + 'SendTestEventNotification', + event_types, test_event_type) + + def _set_notification(self, hit_type, transport, + destination, request_type, + event_types=None, test_event_type=None): + """ + Common operation to set notification or send a test event + notification for a specified HIT type + """ + params = {'HITTypeId': hit_type} + + # from the Developer Guide: + # The 'Active' parameter is optional. If omitted, the active status of + # the HIT type's notification specification is unchanged. All HIT types + # begin with their notification specifications in the "inactive" status. + notification_params = {'Destination': destination, + 'Transport': transport, + 'Version': boto.mturk.notification.NotificationMessage.NOTIFICATION_VERSION, + 'Active': True, + } + + # add specific event types if required + if event_types: + self.build_list_params(notification_params, event_types, + 'EventType') + + # Set up dict of 'Notification.1.Transport' etc. values + notification_rest_params = {} + num = 1 + for key in notification_params: + notification_rest_params['Notification.%d.%s' % (num, key)] = notification_params[key] + + # Update main params dict + params.update(notification_rest_params) + + # If test notification, specify the notification type to be tested + if test_event_type: + params.update({'TestEventType': test_event_type}) + + # Execute operation + return self._process_request(request_type, params) + + def create_hit(self, hit_type=None, question=None, hit_layout=None, + lifetime=datetime.timedelta(days=7), + max_assignments=1, + title=None, description=None, keywords=None, + reward=None, duration=datetime.timedelta(days=7), + approval_delay=None, annotation=None, + questions=None, qualifications=None, + layout_params=None, response_groups=None): + """ + Creates a new HIT. + Returns a ResultSet + See: http://docs.amazonwebservices.com/AWSMechTurk/2012-03-25/AWSMturkAPI/ApiReference_CreateHITOperation.html + """ + + # Handle basic required arguments and set up params dict + params = {'LifetimeInSeconds': + self.duration_as_seconds(lifetime), + 'MaxAssignments': max_assignments, + } + + # handle single or multiple questions or layouts + neither = question is None and questions is None + if hit_layout is None: + both = question is not None and questions is not None + if neither or both: + raise ValueError("Must specify question (single Question instance) or questions (list or QuestionForm instance), but not both") + if question: + questions = [question] + question_param = QuestionForm(questions) + if isinstance(question, QuestionForm): + question_param = question + elif isinstance(question, ExternalQuestion): + question_param = question + elif isinstance(question, HTMLQuestion): + question_param = question + params['Question'] = question_param.get_as_xml() + else: + if not neither: + raise ValueError("Must not specify question (single Question instance) or questions (list or QuestionForm instance) when specifying hit_layout") + params['HITLayoutId'] = hit_layout + if layout_params: + params.update(layout_params.get_as_params()) + + # if hit type specified then add it + # else add the additional required parameters + if hit_type: + params['HITTypeId'] = hit_type + else: + # Handle keywords + final_keywords = MTurkConnection.get_keywords_as_string(keywords) + + # Handle price argument + final_price = MTurkConnection.get_price_as_price(reward) + + final_duration = self.duration_as_seconds(duration) + + additional_params = dict( + Title=title, + Description=description, + Keywords=final_keywords, + AssignmentDurationInSeconds=final_duration, + ) + additional_params.update(final_price.get_as_params('Reward')) + + if approval_delay is not None: + d = self.duration_as_seconds(approval_delay) + additional_params['AutoApprovalDelayInSeconds'] = d + + # add these params to the others + params.update(additional_params) + + # add the annotation if specified + if annotation is not None: + params['RequesterAnnotation'] = annotation + + # Add the Qualifications if specified + if qualifications is not None: + params.update(qualifications.get_as_params()) + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + # Submit + return self._process_request('CreateHIT', params, [('HIT', HIT)]) + + def change_hit_type_of_hit(self, hit_id, hit_type): + """ + Change the HIT type of an existing HIT. Note that the reward associated + with the new HIT type must match the reward of the current HIT type in + order for the operation to be valid. + + :type hit_id: str + :type hit_type: str + """ + params = {'HITId': hit_id, + 'HITTypeId': hit_type} + + return self._process_request('ChangeHITTypeOfHIT', params) + + def get_reviewable_hits(self, hit_type=None, status='Reviewable', + sort_by='Expiration', sort_direction='Ascending', + page_size=10, page_number=1): + """ + Retrieve the HITs that have a status of Reviewable, or HITs that + have a status of Reviewing, and that belong to the Requester + calling the operation. + """ + params = {'Status': status, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + + # Handle optional hit_type argument + if hit_type is not None: + params.update({'HITTypeId': hit_type}) + + return self._process_request('GetReviewableHITs', params, + [('HIT', HIT)]) + + @staticmethod + def _get_pages(page_size, total_records): + """ + Given a page size (records per page) and a total number of + records, return the page numbers to be retrieved. + """ + pages = total_records / page_size + bool(total_records % page_size) + return range(1, pages + 1) + + def get_all_hits(self): + """ + Return all of a Requester's HITs + + Despite what search_hits says, it does not return all hits, but + instead returns a page of hits. This method will pull the hits + from the server 100 at a time, but will yield the results + iteratively, so subsequent requests are made on demand. + """ + page_size = 100 + search_rs = self.search_hits(page_size=page_size) + total_records = int(search_rs.TotalNumResults) + get_page_hits = lambda page: self.search_hits(page_size=page_size, page_number=page) + page_nums = self._get_pages(page_size, total_records) + hit_sets = itertools.imap(get_page_hits, page_nums) + return itertools.chain.from_iterable(hit_sets) + + def search_hits(self, sort_by='CreationTime', sort_direction='Ascending', + page_size=10, page_number=1, response_groups=None): + """ + Return a page of a Requester's HITs, on behalf of the Requester. + The operation returns HITs of any status, except for HITs that + have been disposed with the DisposeHIT operation. + Note: + The SearchHITs operation does not accept any search parameters + that filter the results. + """ + params = {'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('SearchHITs', params, [('HIT', HIT)]) + + def get_assignment(self, assignment_id, response_groups=None): + """ + Retrieves an assignment using the assignment's ID. Requesters can only + retrieve their own assignments, and only assignments whose related HIT + has not been disposed. + + The returned ResultSet will have the following attributes: + + Request + This element is present only if the Request ResponseGroup + is specified. + Assignment + The assignment. The response includes one Assignment object. + HIT + The HIT associated with this assignment. The response + includes one HIT object. + + """ + + params = {'AssignmentId': assignment_id} + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetAssignment', params, + [('Assignment', Assignment), + ('HIT', HIT)]) + + def get_assignments(self, hit_id, status=None, + sort_by='SubmitTime', sort_direction='Ascending', + page_size=10, page_number=1, response_groups=None): + """ + Retrieves completed assignments for a HIT. + Use this operation to retrieve the results for a HIT. + + The returned ResultSet will have the following attributes: + + NumResults + The number of assignments on the page in the filtered results + list, equivalent to the number of assignments being returned + by this call. + A non-negative integer + PageNumber + The number of the page in the filtered results list being + returned. + A positive integer + TotalNumResults + The total number of HITs in the filtered results list based + on this call. + A non-negative integer + + The ResultSet will contain zero or more Assignment objects + + """ + params = {'HITId': hit_id, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + + if status is not None: + params['AssignmentStatus'] = status + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetAssignmentsForHIT', params, + [('Assignment', Assignment)]) + + def approve_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('ApproveAssignment', params) + + def reject_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('RejectAssignment', params) + + def approve_rejected_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('ApproveRejectedAssignment', params) + + def get_hit(self, hit_id, response_groups=None): + """ + """ + params = {'HITId': hit_id} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetHIT', params, [('HIT', HIT)]) + + def set_reviewing(self, hit_id, revert=None): + """ + Update a HIT with a status of Reviewable to have a status of Reviewing, + or reverts a Reviewing HIT back to the Reviewable status. + + Only HITs with a status of Reviewable can be updated with a status of + Reviewing. Similarly, only Reviewing HITs can be reverted back to a + status of Reviewable. + """ + params = {'HITId': hit_id} + if revert: + params['Revert'] = revert + return self._process_request('SetHITAsReviewing', params) + + def disable_hit(self, hit_id, response_groups=None): + """ + Remove a HIT from the Mechanical Turk marketplace, approves all + submitted assignments that have not already been approved or rejected, + and disposes of the HIT and all assignment data. + + Assignments for the HIT that have already been submitted, but not yet + approved or rejected, will be automatically approved. Assignments in + progress at the time of the call to DisableHIT will be approved once + the assignments are submitted. You will be charged for approval of + these assignments. DisableHIT completely disposes of the HIT and + all submitted assignment data. Assignment results data cannot be + retrieved for a HIT that has been disposed. + + It is not possible to re-enable a HIT once it has been disabled. + To make the work from a disabled HIT available again, create a new HIT. + """ + params = {'HITId': hit_id} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('DisableHIT', params) + + def dispose_hit(self, hit_id): + """ + Dispose of a HIT that is no longer needed. + + Only HITs in the "reviewable" state, with all submitted + assignments approved or rejected, can be disposed. A Requester + can call GetReviewableHITs to determine which HITs are + reviewable, then call GetAssignmentsForHIT to retrieve the + assignments. Disposing of a HIT removes the HIT from the + results of a call to GetReviewableHITs. """ + params = {'HITId': hit_id} + return self._process_request('DisposeHIT', params) + + def expire_hit(self, hit_id): + + """ + Expire a HIT that is no longer needed. + + The effect is identical to the HIT expiring on its own. The + HIT no longer appears on the Mechanical Turk web site, and no + new Workers are allowed to accept the HIT. Workers who have + accepted the HIT prior to expiration are allowed to complete + it or return it, or allow the assignment duration to elapse + (abandon the HIT). Once all remaining assignments have been + submitted, the expired HIT becomes"reviewable", and will be + returned by a call to GetReviewableHITs. + """ + params = {'HITId': hit_id} + return self._process_request('ForceExpireHIT', params) + + def extend_hit(self, hit_id, assignments_increment=None, + expiration_increment=None): + """ + Increase the maximum number of assignments, or extend the + expiration date, of an existing HIT. + + NOTE: If a HIT has a status of Reviewable and the HIT is + extended to make it Available, the HIT will not be returned by + GetReviewableHITs, and its submitted assignments will not be + returned by GetAssignmentsForHIT, until the HIT is Reviewable + again. Assignment auto-approval will still happen on its + original schedule, even if the HIT has been extended. Be sure + to retrieve and approve (or reject) submitted assignments + before extending the HIT, if so desired. + """ + # must provide assignment *or* expiration increment + if (assignments_increment is None and expiration_increment is None) or \ + (assignments_increment is not None and expiration_increment is not None): + raise ValueError("Must specify either assignments_increment or expiration_increment, but not both") + + params = {'HITId': hit_id} + if assignments_increment: + params['MaxAssignmentsIncrement'] = assignments_increment + if expiration_increment: + params['ExpirationIncrementInSeconds'] = expiration_increment + + return self._process_request('ExtendHIT', params) + + def get_help(self, about, help_type='Operation'): + """ + Return information about the Mechanical Turk Service + operations and response group NOTE - this is basically useless + as it just returns the URL of the documentation + + help_type: either 'Operation' or 'ResponseGroup' + """ + params = {'About': about, 'HelpType': help_type} + return self._process_request('Help', params) + + def grant_bonus(self, worker_id, assignment_id, bonus_price, reason): + """ + Issues a payment of money from your account to a Worker. To + be eligible for a bonus, the Worker must have submitted + results for one of your HITs, and have had those results + approved or rejected. This payment happens separately from the + reward you pay to the Worker when you approve the Worker's + assignment. The Bonus must be passed in as an instance of the + Price object. + """ + params = bonus_price.get_as_params('BonusAmount', 1) + params['WorkerId'] = worker_id + params['AssignmentId'] = assignment_id + params['Reason'] = reason + + return self._process_request('GrantBonus', params) + + def block_worker(self, worker_id, reason): + """ + Block a worker from working on my tasks. + """ + params = {'WorkerId': worker_id, 'Reason': reason} + + return self._process_request('BlockWorker', params) + + def unblock_worker(self, worker_id, reason): + """ + Unblock a worker from working on my tasks. + """ + params = {'WorkerId': worker_id, 'Reason': reason} + + return self._process_request('UnblockWorker', params) + + def notify_workers(self, worker_ids, subject, message_text): + """ + Send a text message to workers. + """ + params = {'Subject': subject, + 'MessageText': message_text} + self.build_list_params(params, worker_ids, 'WorkerId') + + return self._process_request('NotifyWorkers', params) + + def create_qualification_type(self, + name, + description, + status, + keywords=None, + retry_delay=None, + test=None, + answer_key=None, + answer_key_xml=None, + test_duration=None, + auto_granted=False, + auto_granted_value=1): + """ + Create a new Qualification Type. + + name: This will be visible to workers and must be unique for a + given requester. + + description: description shown to workers. Max 2000 characters. + + status: 'Active' or 'Inactive' + + keywords: list of keyword strings or comma separated string. + Max length of 1000 characters when concatenated with commas. + + retry_delay: number of seconds after requesting a + qualification the worker must wait before they can ask again. + If not specified, workers can only request this qualification + once. + + test: a QuestionForm + + answer_key: an XML string of your answer key, for automatically + scored qualification tests. + (Consider implementing an AnswerKey class for this to support.) + + test_duration: the number of seconds a worker has to complete the test. + + auto_granted: if True, requests for the Qualification are granted + immediately. Can't coexist with a test. + + auto_granted_value: auto_granted qualifications are given this value. + + """ + + params = {'Name': name, + 'Description': description, + 'QualificationTypeStatus': status, + } + if retry_delay is not None: + params['RetryDelayInSeconds'] = retry_delay + + if test is not None: + assert(isinstance(test, QuestionForm)) + assert(test_duration is not None) + params['Test'] = test.get_as_xml() + + if test_duration is not None: + params['TestDurationInSeconds'] = test_duration + + if answer_key is not None: + if isinstance(answer_key, basestring): + params['AnswerKey'] = answer_key # xml + else: + raise TypeError + # Eventually someone will write an AnswerKey class. + + if auto_granted: + assert(test is None) + params['AutoGranted'] = True + params['AutoGrantedValue'] = auto_granted_value + + if keywords: + params['Keywords'] = self.get_keywords_as_string(keywords) + + return self._process_request('CreateQualificationType', params, + [('QualificationType', + QualificationType)]) + + def get_qualification_type(self, qualification_type_id): + params = {'QualificationTypeId': qualification_type_id } + return self._process_request('GetQualificationType', params, + [('QualificationType', QualificationType)]) + + def get_all_qualifications_for_qual_type(self, qualification_type_id): + page_size = 100 + search_qual = self.get_qualifications_for_qualification_type(qualification_type_id) + total_records = int(search_qual.TotalNumResults) + get_page_quals = lambda page: self.get_qualifications_for_qualification_type(qualification_type_id = qualification_type_id, page_size=page_size, page_number = page) + page_nums = self._get_pages(page_size, total_records) + qual_sets = itertools.imap(get_page_quals, page_nums) + return itertools.chain.from_iterable(qual_sets) + + def get_qualifications_for_qualification_type(self, qualification_type_id, page_size=100, page_number = 1): + params = {'QualificationTypeId': qualification_type_id, + 'PageSize': page_size, + 'PageNumber': page_number} + return self._process_request('GetQualificationsForQualificationType', params, + [('Qualification', Qualification)]) + + def update_qualification_type(self, qualification_type_id, + description=None, + status=None, + retry_delay=None, + test=None, + answer_key=None, + test_duration=None, + auto_granted=None, + auto_granted_value=None): + + params = {'QualificationTypeId': qualification_type_id} + + if description is not None: + params['Description'] = description + + if status is not None: + params['QualificationTypeStatus'] = status + + if retry_delay is not None: + params['RetryDelayInSeconds'] = retry_delay + + if test is not None: + assert(isinstance(test, QuestionForm)) + params['Test'] = test.get_as_xml() + + if test_duration is not None: + params['TestDurationInSeconds'] = test_duration + + if answer_key is not None: + if isinstance(answer_key, basestring): + params['AnswerKey'] = answer_key # xml + else: + raise TypeError + # Eventually someone will write an AnswerKey class. + + if auto_granted is not None: + params['AutoGranted'] = auto_granted + + if auto_granted_value is not None: + params['AutoGrantedValue'] = auto_granted_value + + return self._process_request('UpdateQualificationType', params, + [('QualificationType', QualificationType)]) + + def dispose_qualification_type(self, qualification_type_id): + """TODO: Document.""" + params = {'QualificationTypeId': qualification_type_id} + return self._process_request('DisposeQualificationType', params) + + def search_qualification_types(self, query=None, sort_by='Name', + sort_direction='Ascending', page_size=10, + page_number=1, must_be_requestable=True, + must_be_owned_by_caller=True): + """TODO: Document.""" + params = {'Query': query, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number, + 'MustBeRequestable': must_be_requestable, + 'MustBeOwnedByCaller': must_be_owned_by_caller} + return self._process_request('SearchQualificationTypes', params, + [('QualificationType', QualificationType)]) + + def get_qualification_requests(self, qualification_type_id, + sort_by='Expiration', + sort_direction='Ascending', page_size=10, + page_number=1): + """TODO: Document.""" + params = {'QualificationTypeId': qualification_type_id, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + return self._process_request('GetQualificationRequests', params, + [('QualificationRequest', QualificationRequest)]) + + def grant_qualification(self, qualification_request_id, integer_value=1): + """TODO: Document.""" + params = {'QualificationRequestId': qualification_request_id, + 'IntegerValue': integer_value} + return self._process_request('GrantQualification', params) + + def revoke_qualification(self, subject_id, qualification_type_id, + reason=None): + """TODO: Document.""" + params = {'SubjectId': subject_id, + 'QualificationTypeId': qualification_type_id, + 'Reason': reason} + return self._process_request('RevokeQualification', params) + + def assign_qualification(self, qualification_type_id, worker_id, + value=1, send_notification=True): + params = {'QualificationTypeId': qualification_type_id, + 'WorkerId' : worker_id, + 'IntegerValue' : value, + 'SendNotification' : send_notification} + return self._process_request('AssignQualification', params) + + def get_qualification_score(self, qualification_type_id, worker_id): + """TODO: Document.""" + params = {'QualificationTypeId' : qualification_type_id, + 'SubjectId' : worker_id} + return self._process_request('GetQualificationScore', params, + [('Qualification', Qualification)]) + + def update_qualification_score(self, qualification_type_id, worker_id, + value): + """TODO: Document.""" + params = {'QualificationTypeId' : qualification_type_id, + 'SubjectId' : worker_id, + 'IntegerValue' : value} + return self._process_request('UpdateQualificationScore', params) + + def _process_request(self, request_type, params, marker_elems=None): + """ + Helper to process the xml response from AWS + """ + params['Operation'] = request_type + response = self.make_request(None, params, verb='POST') + return self._process_response(response, marker_elems) + + def _process_response(self, response, marker_elems=None): + """ + Helper to process the xml response from AWS + """ + body = response.read() + if self.debug == 2: + print body + if '' not in body: + rs = ResultSet(marker_elems) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + else: + raise MTurkRequestError(response.status, response.reason, body) + + @staticmethod + def get_keywords_as_string(keywords): + """ + Returns a comma+space-separated string of keywords from either + a list or a string + """ + if isinstance(keywords, list): + keywords = ', '.join(keywords) + if isinstance(keywords, str): + final_keywords = keywords + elif isinstance(keywords, unicode): + final_keywords = keywords.encode('utf-8') + elif keywords is None: + final_keywords = "" + else: + raise TypeError("keywords argument must be a string or a list of strings; got a %s" % type(keywords)) + return final_keywords + + @staticmethod + def get_price_as_price(reward): + """ + Returns a Price data structure from either a float or a Price + """ + if isinstance(reward, Price): + final_price = reward + else: + final_price = Price(reward) + return final_price + + @staticmethod + def duration_as_seconds(duration): + if isinstance(duration, datetime.timedelta): + duration = duration.days * 86400 + duration.seconds + try: + duration = int(duration) + except TypeError: + raise TypeError("Duration must be a timedelta or int-castable, got %s" % type(duration)) + return duration + + +class BaseAutoResultElement: + """ + Base class to automatically add attributes when parsing XML + """ + def __init__(self, connection): + pass + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class HIT(BaseAutoResultElement): + """ + Class to extract a HIT structure from a response (used in ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. HITId, HITTypeId, CreationTime + """ + + # property helper to determine if HIT has expired + def _has_expired(self): + """ Has this HIT expired yet? """ + expired = False + if hasattr(self, 'Expiration'): + now = datetime.datetime.utcnow() + expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ') + expired = (now >= expiration) + else: + raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!") + return expired + + # are we there yet? + expired = property(_has_expired) + + +class HITTypeId(BaseAutoResultElement): + """ + Class to extract an HITTypeId structure from a response + """ + + pass + + +class Qualification(BaseAutoResultElement): + """ + Class to extract an Qualification structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide such as + QualificationTypeId, IntegerValue. Does not seem to contain GrantTime. + """ + + pass + + +class QualificationType(BaseAutoResultElement): + """ + Class to extract an QualificationType structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. QualificationTypeId, CreationTime, Name, etc + """ + + pass + + +class QualificationRequest(BaseAutoResultElement): + """ + Class to extract an QualificationRequest structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. QualificationRequestId, QualificationTypeId, SubjectId, etc + """ + + def __init__(self, connection): + BaseAutoResultElement.__init__(self, connection) + self.answers = [] + + def endElement(self, name, value, connection): + # the answer consists of embedded XML, so it needs to be parsed independantly + if name == 'Answer': + answer_rs = ResultSet([('Answer', QuestionFormAnswer)]) + h = handler.XmlHandler(answer_rs, connection) + value = connection.get_utf8_value(value) + xml.sax.parseString(value, h) + self.answers.append(answer_rs) + else: + BaseAutoResultElement.endElement(self, name, value, connection) + + +class Assignment(BaseAutoResultElement): + """ + Class to extract an Assignment structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. AssignmentId, WorkerId, HITId, Answer, etc + """ + + def __init__(self, connection): + BaseAutoResultElement.__init__(self, connection) + self.answers = [] + + def endElement(self, name, value, connection): + # the answer consists of embedded XML, so it needs to be parsed independantly + if name == 'Answer': + answer_rs = ResultSet([('Answer', QuestionFormAnswer)]) + h = handler.XmlHandler(answer_rs, connection) + value = connection.get_utf8_value(value) + xml.sax.parseString(value, h) + self.answers.append(answer_rs) + else: + BaseAutoResultElement.endElement(self, name, value, connection) + + +class QuestionFormAnswer(BaseAutoResultElement): + """ + Class to extract Answers from inside the embedded XML + QuestionFormAnswers element inside the Answer element which is + part of the Assignment and QualificationRequest structures + + A QuestionFormAnswers element contains an Answer element for each + question in the HIT or Qualification test for which the Worker + provided an answer. Each Answer contains a QuestionIdentifier + element whose value corresponds to the QuestionIdentifier of a + Question in the QuestionForm. See the QuestionForm data structure + for more information about questions and answer specifications. + + If the question expects a free-text answer, the Answer element + contains a FreeText element. This element contains the Worker's + answer + + *NOTE* - currently really only supports free-text and selection answers + """ + + def __init__(self, connection): + BaseAutoResultElement.__init__(self, connection) + self.fields = [] + self.qid = None + + def endElement(self, name, value, connection): + if name == 'QuestionIdentifier': + self.qid = value + elif name in ['FreeText', 'SelectionIdentifier', 'OtherSelectionText'] and self.qid: + self.fields.append(value) diff --git a/awx/lib/site-packages/boto/mturk/layoutparam.py b/awx/lib/site-packages/boto/mturk/layoutparam.py new file mode 100644 index 0000000000..16e5932896 --- /dev/null +++ b/awx/lib/site-packages/boto/mturk/layoutparam.py @@ -0,0 +1,55 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LayoutParameters: + + def __init__(self, layoutParameters=None): + if layoutParameters == None: + layoutParameters = [] + self.layoutParameters = layoutParameters + + def add(self, req): + self.layoutParameters.append(req) + + def get_as_params(self): + params = {} + assert(len(self.layoutParameters) <= 25) + for n, layoutParameter in enumerate(self.layoutParameters): + kv = layoutParameter.get_as_params() + for key in kv: + params['HITLayoutParameter.%s.%s' % ((n+1), key) ] = kv[key] + return params + +class LayoutParameter(object): + """ + Representation of a single HIT layout parameter + """ + + def __init__(self, name, value): + self.name = name + self.value = value + + def get_as_params(self): + params = { + "Name": self.name, + "Value": self.value, + } + return params diff --git a/awx/lib/site-packages/boto/mturk/notification.py b/awx/lib/site-packages/boto/mturk/notification.py new file mode 100644 index 0000000000..02c93aab14 --- /dev/null +++ b/awx/lib/site-packages/boto/mturk/notification.py @@ -0,0 +1,103 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Provides NotificationMessage and Event classes, with utility methods, for +implementations of the Mechanical Turk Notification API. +""" + +import hmac +try: + from hashlib import sha1 as sha +except ImportError: + import sha +import base64 +import re + +class NotificationMessage: + + NOTIFICATION_WSDL = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurk/2006-05-05/AWSMechanicalTurkRequesterNotification.wsdl" + NOTIFICATION_VERSION = '2006-05-05' + + SERVICE_NAME = "AWSMechanicalTurkRequesterNotification" + OPERATION_NAME = "Notify" + + EVENT_PATTERN = r"Event\.(?P\d+)\.(?P\w+)" + EVENT_RE = re.compile(EVENT_PATTERN) + + def __init__(self, d): + """ + Constructor; expects parameter d to be a dict of string parameters from a REST transport notification message + """ + self.signature = d['Signature'] # vH6ZbE0NhkF/hfNyxz2OgmzXYKs= + self.timestamp = d['Timestamp'] # 2006-05-23T23:22:30Z + self.version = d['Version'] # 2006-05-05 + assert d['method'] == NotificationMessage.OPERATION_NAME, "Method should be '%s'" % NotificationMessage.OPERATION_NAME + + # Build Events + self.events = [] + events_dict = {} + if 'Event' in d: + # TurboGears surprised me by 'doing the right thing' and making { 'Event': { '1': { 'EventType': ... } } } etc. + events_dict = d['Event'] + else: + for k in d: + v = d[k] + if k.startswith('Event.'): + ed = NotificationMessage.EVENT_RE.search(k).groupdict() + n = int(ed['n']) + param = str(ed['param']) + if n not in events_dict: + events_dict[n] = {} + events_dict[n][param] = v + for n in events_dict: + self.events.append(Event(events_dict[n])) + + def verify(self, secret_key): + """ + Verifies the authenticity of a notification message. + + TODO: This is doing a form of authentication and + this functionality should really be merged + with the pluggable authentication mechanism + at some point. + """ + verification_input = NotificationMessage.SERVICE_NAME + verification_input += NotificationMessage.OPERATION_NAME + verification_input += self.timestamp + h = hmac.new(key=secret_key, digestmod=sha) + h.update(verification_input) + signature_calc = base64.b64encode(h.digest()) + return self.signature == signature_calc + +class Event: + def __init__(self, d): + self.event_type = d['EventType'] + self.event_time_str = d['EventTime'] + self.hit_type = d['HITTypeId'] + self.hit_id = d['HITId'] + if 'AssignmentId' in d: # Not present in all event types + self.assignment_id = d['AssignmentId'] + + #TODO: build self.event_time datetime from string self.event_time_str + + def __repr__(self): + return "" % (self.event_type, self.hit_id) diff --git a/awx/lib/site-packages/boto/mturk/price.py b/awx/lib/site-packages/boto/mturk/price.py new file mode 100644 index 0000000000..3c88a96549 --- /dev/null +++ b/awx/lib/site-packages/boto/mturk/price.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Price: + + def __init__(self, amount=0.0, currency_code='USD'): + self.amount = amount + self.currency_code = currency_code + self.formatted_price = '' + + def __repr__(self): + if self.formatted_price: + return self.formatted_price + else: + return str(self.amount) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Amount': + self.amount = float(value) + elif name == 'CurrencyCode': + self.currency_code = value + elif name == 'FormattedPrice': + self.formatted_price = value + + def get_as_params(self, label, ord=1): + return {'%s.%d.Amount'%(label, ord) : str(self.amount), + '%s.%d.CurrencyCode'%(label, ord) : self.currency_code} diff --git a/awx/lib/site-packages/boto/mturk/qualification.py b/awx/lib/site-packages/boto/mturk/qualification.py new file mode 100644 index 0000000000..8272d6d1b9 --- /dev/null +++ b/awx/lib/site-packages/boto/mturk/qualification.py @@ -0,0 +1,137 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Qualifications: + + def __init__(self, requirements=None): + if requirements == None: + requirements = [] + self.requirements = requirements + + def add(self, req): + self.requirements.append(req) + + def get_as_params(self): + params = {} + assert(len(self.requirements) <= 10) + for n, req in enumerate(self.requirements): + reqparams = req.get_as_params() + for rp in reqparams: + params['QualificationRequirement.%s.%s' % ((n+1), rp) ] = reqparams[rp] + return params + + +class Requirement(object): + """ + Representation of a single requirement + """ + + def __init__(self, qualification_type_id, comparator, integer_value=None, required_to_preview=False): + self.qualification_type_id = qualification_type_id + self.comparator = comparator + self.integer_value = integer_value + self.required_to_preview = required_to_preview + + def get_as_params(self): + params = { + "QualificationTypeId": self.qualification_type_id, + "Comparator": self.comparator, + } + if self.comparator != 'Exists' and self.integer_value is not None: + params['IntegerValue'] = self.integer_value + if self.required_to_preview: + params['RequiredToPreview'] = "true" + return params + +class PercentAssignmentsSubmittedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsAbandonedRequirement(Requirement): + """ + The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsReturnedRequirement(Requirement): + """ + The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsApprovedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsRejectedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class NumberHitsApprovedRequirement(Requirement): + """ + Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class LocaleRequirement(Requirement): + """ + A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account. + """ + + def __init__(self, comparator, locale, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview) + self.locale = locale + + def get_as_params(self): + params = { + "QualificationTypeId": self.qualification_type_id, + "Comparator": self.comparator, + 'LocaleValue.Country': self.locale, + } + if self.required_to_preview: + params['RequiredToPreview'] = "true" + return params + +class AdultRequirement(Requirement): + """ + Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default). + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) diff --git a/awx/lib/site-packages/boto/mturk/question.py b/awx/lib/site-packages/boto/mturk/question.py new file mode 100644 index 0000000000..90ab00dbd6 --- /dev/null +++ b/awx/lib/site-packages/boto/mturk/question.py @@ -0,0 +1,455 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax.saxutils + +class Question(object): + template = "%(items)s" + + def __init__(self, identifier, content, answer_spec, + is_required=False, display_name=None): + # copy all of the parameters into object attributes + self.__dict__.update(vars()) + del self.self + + def get_as_params(self, label='Question'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + items = [ + SimpleField('QuestionIdentifier', self.identifier), + SimpleField('IsRequired', str(self.is_required).lower()), + self.content, + self.answer_spec, + ] + if self.display_name is not None: + items.insert(1, SimpleField('DisplayName', self.display_name)) + items = ''.join(item.get_as_xml() for item in items) + return self.template % vars() + +try: + from lxml import etree + + class ValidatingXML(object): + + def validate(self): + import urllib2 + schema_src_file = urllib2.urlopen(self.schema_url) + schema_doc = etree.parse(schema_src_file) + schema = etree.XMLSchema(schema_doc) + doc = etree.fromstring(self.get_as_xml()) + schema.assertValid(doc) +except ImportError: + class ValidatingXML(object): + + def validate(self): + pass + + +class ExternalQuestion(ValidatingXML): + """ + An object for constructing an External Question. + """ + schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd" + template = '%%(external_url)s%%(frame_height)s' % vars() + + def __init__(self, external_url, frame_height): + self.external_url = xml.sax.saxutils.escape( external_url ) + self.frame_height = frame_height + + def get_as_params(self, label='ExternalQuestion'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + return self.template % vars(self) + + +class XMLTemplate: + def get_as_xml(self): + return self.template % vars(self) + + +class SimpleField(object, XMLTemplate): + """ + A Simple name/value pair that can be easily rendered as XML. + + >>> SimpleField('Text', 'A text string').get_as_xml() + 'A text string' + """ + template = '<%(field)s>%(value)s' + + def __init__(self, field, value): + self.field = field + self.value = value + + +class Binary(object, XMLTemplate): + template = """%(type)s%(subtype)s%(url)s%(alt_text)s""" + + def __init__(self, type, subtype, url, alt_text): + self.__dict__.update(vars()) + del self.self + + +class List(list): + """A bulleted list suitable for OrderedContent or Overview content""" + def get_as_xml(self): + items = ''.join('%s' % item for item in self) + return '%s' % items + + +class Application(object): + template = "<%(class_)s>%(content)s" + parameter_template = "%(name)s%(value)s" + + def __init__(self, width, height, **parameters): + self.width = width + self.height = height + self.parameters = parameters + + def get_inner_content(self, content): + content.append_field('Width', self.width) + content.append_field('Height', self.height) + for name, value in self.parameters.items(): + value = self.parameter_template % vars() + content.append_field('ApplicationParameter', value) + + def get_as_xml(self): + content = OrderedContent() + self.get_inner_content(content) + content = content.get_as_xml() + class_ = self.__class__.__name__ + return self.template % vars() + + +class HTMLQuestion(ValidatingXML): + schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd' + template = '%%(html_form)s]]>%%(frame_height)s' % vars() + + def __init__(self, html_form, frame_height): + self.html_form = html_form + self.frame_height = frame_height + + def get_as_params(self, label="HTMLQuestion"): + return {label: self.get_as_xml()} + + def get_as_xml(self): + return self.template % vars(self) + + +class JavaApplet(Application): + def __init__(self, path, filename, *args, **kwargs): + self.path = path + self.filename = filename + super(JavaApplet, self).__init__(*args, **kwargs) + + def get_inner_content(self, content): + content = OrderedContent() + content.append_field('AppletPath', self.path) + content.append_field('AppletFilename', self.filename) + super(JavaApplet, self).get_inner_content(content) + + +class Flash(Application): + def __init__(self, url, *args, **kwargs): + self.url = url + super(Flash, self).__init__(*args, **kwargs) + + def get_inner_content(self, content): + content = OrderedContent() + content.append_field('FlashMovieURL', self.url) + super(Flash, self).get_inner_content(content) + + +class FormattedContent(object, XMLTemplate): + schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/FormattedContentXHTMLSubset.xsd' + template = '' + + def __init__(self, content): + self.content = content + + +class OrderedContent(list): + + def append_field(self, field, value): + self.append(SimpleField(field, value)) + + def get_as_xml(self): + return ''.join(item.get_as_xml() for item in self) + + +class Overview(OrderedContent): + template = '%(content)s' + + def get_as_params(self, label='Overview'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + content = super(Overview, self).get_as_xml() + return self.template % vars() + + +class QuestionForm(ValidatingXML, list): + """ + From the AMT API docs: + + The top-most element of the QuestionForm data structure is a + QuestionForm element. This element contains optional Overview + elements and one or more Question elements. There can be any + number of these two element types listed in any order. The + following example structure has an Overview element and a + Question element followed by a second Overview element and + Question element--all within the same QuestionForm. + + :: + + + + [...] + + + [...] + + + [...] + + + [...] + + [...] + + + QuestionForm is implemented as a list, so to construct a + QuestionForm, simply append Questions and Overviews (with at least + one Question). + """ + schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd" + xml_template = """%%(items)s""" % vars() + + def is_valid(self): + return ( + any(isinstance(item, Question) for item in self) + and + all(isinstance(item, (Question, Overview)) for item in self) + ) + + def get_as_xml(self): + assert self.is_valid(), "QuestionForm contains invalid elements" + items = ''.join(item.get_as_xml() for item in self) + return self.xml_template % vars() + + +class QuestionContent(OrderedContent): + template = '%(content)s' + + def get_as_xml(self): + content = super(QuestionContent, self).get_as_xml() + return self.template % vars() + + +class AnswerSpecification(object): + template = '%(spec)s' + + def __init__(self, spec): + self.spec = spec + + def get_as_xml(self): + spec = self.spec.get_as_xml() + return self.template % vars() + + +class Constraints(OrderedContent): + template = '%(content)s' + + def get_as_xml(self): + content = super(Constraints, self).get_as_xml() + return self.template % vars() + + +class Constraint(object): + def get_attributes(self): + pairs = zip(self.attribute_names, self.attribute_values) + attrs = ' '.join( + '%s="%d"' % (name, value) + for (name, value) in pairs + if value is not None + ) + return attrs + + def get_as_xml(self): + attrs = self.get_attributes() + return self.template % vars() + + +class NumericConstraint(Constraint): + attribute_names = 'minValue', 'maxValue' + template = '' + + def __init__(self, min_value=None, max_value=None): + self.attribute_values = min_value, max_value + + +class LengthConstraint(Constraint): + attribute_names = 'minLength', 'maxLength' + template = '' + + def __init__(self, min_length=None, max_length=None): + self.attribute_values = min_length, max_length + + +class RegExConstraint(Constraint): + attribute_names = 'regex', 'errorText', 'flags' + template = '' + + def __init__(self, pattern, error_text=None, flags=None): + self.attribute_values = pattern, error_text, flags + + def get_attributes(self): + pairs = zip(self.attribute_names, self.attribute_values) + attrs = ' '.join( + '%s="%s"' % (name, value) + for (name, value) in pairs + if value is not None + ) + return attrs + + +class NumberOfLinesSuggestion(object): + template = '%(num_lines)s' + + def __init__(self, num_lines=1): + self.num_lines = num_lines + + def get_as_xml(self): + num_lines = self.num_lines + return self.template % vars() + + +class FreeTextAnswer(object): + template = '%(items)s' + + def __init__(self, default=None, constraints=None, num_lines=None): + self.default = default + if constraints is None: + self.constraints = Constraints() + else: + self.constraints = Constraints(constraints) + self.num_lines = num_lines + + def get_as_xml(self): + items = [self.constraints] + if self.default: + items.append(SimpleField('DefaultText', self.default)) + if self.num_lines: + items.append(NumberOfLinesSuggestion(self.num_lines)) + items = ''.join(item.get_as_xml() for item in items) + return self.template % vars() + + +class FileUploadAnswer(object): + template = """%(max_bytes)d%(min_bytes)d""" + + def __init__(self, min_bytes, max_bytes): + assert 0 <= min_bytes <= max_bytes <= 2 * 10 ** 9 + self.min_bytes = min_bytes + self.max_bytes = max_bytes + + def get_as_xml(self): + return self.template % vars(self) + + +class SelectionAnswer(object): + """ + A class to generate SelectionAnswer XML data structures. + Does not yet implement Binary selection options. + """ + SELECTIONANSWER_XML_TEMPLATE = """%s%s%s""" # % (count_xml, style_xml, selections_xml) + SELECTION_XML_TEMPLATE = """%s%s""" # (identifier, value_xml) + SELECTION_VALUE_XML_TEMPLATE = """<%s>%s""" # (type, value, type) + STYLE_XML_TEMPLATE = """%s""" # (style) + MIN_SELECTION_COUNT_XML_TEMPLATE = """%s""" # count + MAX_SELECTION_COUNT_XML_TEMPLATE = """%s""" # count + ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser'] + OTHER_SELECTION_ELEMENT_NAME = 'OtherSelection' + + def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False): + + if style is not None: + if style in SelectionAnswer.ACCEPTED_STYLES: + self.style_suggestion = style + else: + raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES))) + else: + self.style_suggestion = None + + if selections is None: + raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of (content, identifier) tuples") + else: + self.selections = selections + + self.min_selections = min + self.max_selections = max + + assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections + #assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections + + self.type = type + + self.other = other + + def get_as_xml(self): + if self.type == 'text': + TYPE_TAG = "Text" + elif self.type == 'binary': + TYPE_TAG = "Binary" + else: + raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type)) + + # build list of elements + selections_xml = "" + for tpl in self.selections: + value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG) + selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml) + selections_xml += selection_xml + + if self.other: + # add OtherSelection element as xml if available + if hasattr(self.other, 'get_as_xml'): + assert isinstance(self.other, FreeTextAnswer), 'OtherSelection can only be a FreeTextAnswer' + selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection') + else: + selections_xml += "" + + if self.style_suggestion is not None: + style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion + else: + style_xml = "" + + if self.style_suggestion != 'radiobutton': + count_xml = SelectionAnswer.MIN_SELECTION_COUNT_XML_TEMPLATE %self.min_selections + count_xml += SelectionAnswer.MAX_SELECTION_COUNT_XML_TEMPLATE %self.max_selections + else: + count_xml = "" + + ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (count_xml, style_xml, selections_xml) + + # return XML + return ret diff --git a/awx/lib/site-packages/boto/mws/__init__.py b/awx/lib/site-packages/boto/mws/__init__.py new file mode 100644 index 0000000000..d69b7f08a4 --- /dev/null +++ b/awx/lib/site-packages/boto/mws/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008, Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/awx/lib/site-packages/boto/mws/connection.py b/awx/lib/site-packages/boto/mws/connection.py new file mode 100644 index 0000000000..db58e6d744 --- /dev/null +++ b/awx/lib/site-packages/boto/mws/connection.py @@ -0,0 +1,813 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import xml.sax +import hashlib +import base64 +import string +from boto.connection import AWSQueryConnection +from boto.mws.exception import ResponseErrorFactory +from boto.mws.response import ResponseFactory, ResponseElement +from boto.handler import XmlHandler +import boto.mws.response + +__all__ = ['MWSConnection'] + +api_version_path = { + 'Feeds': ('2009-01-01', 'Merchant', '/'), + 'Reports': ('2009-01-01', 'Merchant', '/'), + 'Orders': ('2011-01-01', 'SellerId', '/Orders/2011-01-01'), + 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), + 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), + 'Inbound': ('2010-10-01', 'SellerId', + '/FulfillmentInboundShipment/2010-10-01'), + 'Outbound': ('2010-10-01', 'SellerId', + '/FulfillmentOutboundShipment/2010-10-01'), + 'Inventory': ('2010-10-01', 'SellerId', + '/FulfillmentInventory/2010-10-01'), +} +content_md5 = lambda c: base64.encodestring(hashlib.md5(c).digest()).strip() +decorated_attrs = ('action', 'response', 'section', + 'quota', 'restore', 'version') + + +def add_attrs_from(func, to): + for attr in decorated_attrs: + setattr(to, attr, getattr(func, attr, None)) + return to + + +def structured_lists(*fields): + + def decorator(func): + + def wrapper(self, *args, **kw): + for key, acc in [f.split('.') for f in fields]: + if key in kw: + newkey = key + '.' + acc + (acc and '.' or '') + for i in range(len(kw[key])): + kw[newkey + str(i + 1)] = kw[key][i] + kw.pop(key) + return func(self, *args, **kw) + wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def http_body(field): + + def decorator(func): + + def wrapper(*args, **kw): + if filter(lambda x: not x in kw, (field, 'content_type')): + message = "{0} requires {1} and content_type arguments for " \ + "building HTTP body".format(func.action, field) + raise KeyError(message) + kw['body'] = kw.pop(field) + kw['headers'] = { + 'Content-Type': kw.pop('content_type'), + 'Content-MD5': content_md5(kw['body']), + } + return func(*args, **kw) + wrapper.__doc__ = "{0}\nRequired HTTP Body: " \ + "{1}".format(func.__doc__, field) + return add_attrs_from(func, to=wrapper) + return decorator + + +def destructure_object(value, into={}, prefix=''): + if isinstance(value, ResponseElement): + for name, attr in value.__dict__.items(): + if name.startswith('_'): + continue + destructure_object(attr, into=into, prefix=prefix + '.' + name) + elif filter(lambda x: isinstance(value, x), (list, set, tuple)): + for index, element in [(prefix + '.' + str(i + 1), value[i]) + for i in range(len(value))]: + destructure_object(element, into=into, prefix=index) + elif isinstance(value, bool): + into[prefix] = str(value).lower() + else: + into[prefix] = value + + +def structured_objects(*fields): + + def decorator(func): + + def wrapper(*args, **kw): + for field in filter(kw.has_key, fields): + destructure_object(kw.pop(field), into=kw, prefix=field) + return func(*args, **kw) + wrapper.__doc__ = "{0}\nObjects: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def requires(*groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda x: len(x) == len(filter(kw.has_key, x)) + if 1 != len(filter(hasgroup, groups)): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} requires {1} argument(s)" \ + "".format(func.action, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\nRequired: {1}".format(func.__doc__, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def exclusive(*groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda x: len(x) == len(filter(kw.has_key, x)) + if len(filter(hasgroup, groups)) not in (0, 1): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} requires either {1}" \ + "".format(func.action, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def dependent(field, *groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda x: len(x) == len(filter(kw.has_key, x)) + if field in kw and 1 > len(filter(hasgroup, groups)): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} argument {1} requires {2}" \ + "".format(func.action, field, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__, + field, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def requires_some_of(*fields): + + def decorator(func): + + def wrapper(*args, **kw): + if not filter(kw.has_key, fields): + message = "{0} requires at least one of {1} argument(s)" \ + "".format(func.action, ', '.join(fields)) + raise KeyError(message) + return func(*args, **kw) + wrapper.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def boolean_arguments(*fields): + + def decorator(func): + + def wrapper(*args, **kw): + for field in filter(lambda x: isinstance(kw.get(x), bool), fields): + kw[field] = str(kw[field]).lower() + return func(*args, **kw) + wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def api_action(section, quota, restore, *api): + + def decorator(func, quota=int(quota), restore=float(restore)): + version, accesskey, path = api_version_path[section] + action = ''.join(api or map(str.capitalize, func.func_name.split('_'))) + if hasattr(boto.mws.response, action + 'Response'): + response = getattr(boto.mws.response, action + 'Response') + else: + response = ResponseFactory(action) + response._action = action + + def wrapper(self, *args, **kw): + kw.setdefault(accesskey, getattr(self, accesskey, None)) + if kw[accesskey] is None: + message = "{0} requires {1} argument. Set the " \ + "MWSConnection.{2} attribute?" \ + "".format(action, accesskey, accesskey) + raise KeyError(message) + kw['Action'] = action + kw['Version'] = version + return func(self, path, response, *args, **kw) + for attr in decorated_attrs: + setattr(wrapper, attr, locals().get(attr)) + wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \ + "{4}".format(action, version, quota, restore, + func.__doc__) + return wrapper + return decorator + + +class MWSConnection(AWSQueryConnection): + + ResponseError = ResponseErrorFactory + + def __init__(self, *args, **kw): + kw.setdefault('host', 'mws.amazonservices.com') + self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId') + self.SellerId = kw.pop('SellerId', None) or self.Merchant + AWSQueryConnection.__init__(self, *args, **kw) + + def _required_auth_capability(self): + return ['mws'] + + def post_request(self, path, params, cls, body='', headers={}, isXML=True): + """Make a POST request, optionally with a content body, + and return the response, optionally as raw text. + Modelled off of the inherited get_object/make_request flow. + """ + request = self.build_base_http_request('POST', path, None, data=body, + params=params, headers=headers, host=self.server_name()) + response = self._mexe(request, override_num_retries=None) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self.ResponseError(response.status, response.reason, body) + if response.status != 200: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + if not isXML: + digest = response.getheader('Content-MD5') + assert content_md5(body) == digest + return body + obj = cls(self) + h = XmlHandler(obj, self) + xml.sax.parseString(body, h) + return obj + + def method_for(self, name): + """Return the MWS API method referred to in the argument. + The named method can be in CamelCase or underlined_lower_case. + This is the complement to MWSConnection.any_call.action + """ + # this looks ridiculous but it should be better than regex + action = '_' in name and string.capwords(name, '_') or name + attribs = [getattr(self, m) for m in dir(self)] + ismethod = lambda m: type(m) is type(self.method_for) + ismatch = lambda m: getattr(m, 'action', None) == action + method = filter(ismatch, filter(ismethod, attribs)) + return method and method[0] or None + + def iter_call(self, call, *args, **kw): + """Pass a call name as the first argument and a generator + is returned for the initial response and any continuation + call responses made using the NextToken. + """ + method = self.method_for(call) + assert method, 'No call named "{0}"'.format(call) + return self.iter_response(method(*args, **kw)) + + def iter_response(self, response): + """Pass a call's response as the initial argument and a + generator is returned for the initial response and any + continuation call responses made using the NextToken. + """ + yield response + more = self.method_for(response._action + 'ByNextToken') + while more and response._result.HasNext == 'true': + response = more(NextToken=response._result.NextToken) + yield response + + @boolean_arguments('PurgeAndReplace') + @http_body('FeedContent') + @structured_lists('MarketplaceIdList.Id') + @requires(['FeedType']) + @api_action('Feeds', 15, 120) + def submit_feed(self, path, response, headers={}, body='', **kw): + """Uploads a feed for processing by Amazon MWS. + """ + return self.post_request(path, kw, response, body=body, + headers=headers) + + @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type', + 'FeedProcessingStatusList.Status') + @api_action('Feeds', 10, 45) + def get_feed_submission_list(self, path, response, **kw): + """Returns a list of all feed submissions submitted in the + previous 90 days. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Feeds', 0, 0) + def get_feed_submission_list_by_next_token(self, path, response, **kw): + """Returns a list of feed submissions using the NextToken parameter. + """ + return self.post_request(path, kw, response) + + @structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status') + @api_action('Feeds', 10, 45) + def get_feed_submission_count(self, path, response, **kw): + """Returns a count of the feeds submitted in the previous 90 days. + """ + return self.post_request(path, kw, response) + + @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type') + @api_action('Feeds', 10, 45) + def cancel_feed_submissions(self, path, response, **kw): + """Cancels one or more feed submissions and returns a + count of the feed submissions that were canceled. + """ + return self.post_request(path, kw, response) + + @requires(['FeedSubmissionId']) + @api_action('Feeds', 15, 60) + def get_feed_submission_result(self, path, response, **kw): + """Returns the feed processing report. + """ + return self.post_request(path, kw, response, isXML=False) + + def get_service_status(self, **kw): + """Instruct the user on how to get service status. + """ + message = "Use {0}.get_(section)_service_status(), " \ + "where (section) is one of the following: " \ + "{1}".format(self.__class__.__name__, + ', '.join(map(str.lower, api_version_path.keys()))) + raise AttributeError(message) + + @structured_lists('MarketplaceIdList.Id') + @boolean_arguments('ReportOptions=ShowSalesChannel') + @requires(['ReportType']) + @api_action('Reports', 15, 60) + def request_report(self, path, response, **kw): + """Creates a report request and submits the request to Amazon MWS. + """ + return self.post_request(path, kw, response) + + @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type', + 'ReportProcessingStatusList.Status') + @api_action('Reports', 10, 45) + def get_report_request_list(self, path, response, **kw): + """Returns a list of report requests that you can use to get the + ReportRequestId for a report. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_request_list_by_next_token(self, path, response, **kw): + """Returns a list of report requests using the NextToken, + which was supplied by a previous request to either + GetReportRequestListByNextToken or GetReportRequestList, where + the value of HasNext was true in that previous request. + """ + return self.post_request(path, kw, response) + + @structured_lists('ReportTypeList.Type', + 'ReportProcessingStatusList.Status') + @api_action('Reports', 10, 45) + def get_report_request_count(self, path, response, **kw): + """Returns a count of report requests that have been submitted + to Amazon MWS for processing. + """ + return self.post_request(path, kw, response) + + @api_action('Reports', 10, 45) + def cancel_report_requests(self, path, response, **kw): + """Cancel one or more report requests, returning the count of the + canceled report requests and the report request information. + """ + return self.post_request(path, kw, response) + + @boolean_arguments('Acknowledged') + @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type') + @api_action('Reports', 10, 60) + def get_report_list(self, path, response, **kw): + """Returns a list of reports that were created in the previous + 90 days that match the query parameters. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_list_by_next_token(self, path, response, **kw): + """Returns a list of reports using the NextToken, which + was supplied by a previous request to either + GetReportListByNextToken or GetReportList, where the + value of HasNext was true in the previous call. + """ + return self.post_request(path, kw, response) + + @boolean_arguments('Acknowledged') + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_count(self, path, response, **kw): + """Returns a count of the reports, created in the previous 90 days, + with a status of _DONE_ and that are available for download. + """ + return self.post_request(path, kw, response) + + @requires(['ReportId']) + @api_action('Reports', 15, 60) + def get_report(self, path, response, **kw): + """Returns the contents of a report. + """ + return self.post_request(path, kw, response, isXML=False) + + @requires(['ReportType', 'Schedule']) + @api_action('Reports', 10, 45) + def manage_report_schedule(self, path, response, **kw): + """Creates, updates, or deletes a report request schedule for + a specified report type. + """ + return self.post_request(path, kw, response) + + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_schedule_list(self, path, response, **kw): + """Returns a list of order report requests that are scheduled + to be submitted to Amazon MWS for processing. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_schedule_list_by_next_token(self, path, response, **kw): + """Returns a list of report requests using the NextToken, + which was supplied by a previous request to either + GetReportScheduleListByNextToken or GetReportScheduleList, + where the value of HasNext was true in that previous request. + """ + return self.post_request(path, kw, response) + + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_schedule_count(self, path, response, **kw): + """Returns a count of order report requests that are scheduled + to be submitted to Amazon MWS. + """ + return self.post_request(path, kw, response) + + @boolean_arguments('Acknowledged') + @requires(['ReportIdList']) + @structured_lists('ReportIdList.Id') + @api_action('Reports', 10, 45) + def update_report_acknowledgements(self, path, response, **kw): + """Updates the acknowledged status of one or more reports. + """ + return self.post_request(path, kw, response) + + @requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems']) + @structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems') + @api_action('Inbound', 30, 0.5) + def create_inbound_shipment_plan(self, path, response, **kw): + """Returns the information required to create an inbound shipment. + """ + return self.post_request(path, kw, response) + + @requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems']) + @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') + @api_action('Inbound', 30, 0.5) + def create_inbound_shipment(self, path, response, **kw): + """Creates an inbound shipment. + """ + return self.post_request(path, kw, response) + + @requires(['ShipmentId']) + @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') + @api_action('Inbound', 30, 0.5) + def update_inbound_shipment(self, path, response, **kw): + """Updates an existing inbound shipment. Amazon documentation + is ambiguous as to whether the InboundShipmentHeader and + InboundShipmentItems arguments are required. + """ + return self.post_request(path, kw, response) + + @requires_some_of('ShipmentIdList', 'ShipmentStatusList') + @structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status') + @api_action('Inbound', 30, 0.5) + def list_inbound_shipments(self, path, response, **kw): + """Returns a list of inbound shipments based on criteria that + you specify. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipments_by_next_token(self, path, response, **kw): + """Returns the next page of inbound shipments using the NextToken + parameter. + """ + return self.post_request(path, kw, response) + + @requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipment_items(self, path, response, **kw): + """Returns a list of items in a specified inbound shipment, or a + list of items that were updated within a specified time frame. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipment_items_by_next_token(self, path, response, **kw): + """Returns the next page of inbound shipment items using the + NextToken parameter. + """ + return self.post_request(path, kw, response) + + @api_action('Inbound', 2, 300, 'GetServiceStatus') + def get_inbound_service_status(self, path, response, **kw): + """Returns the operational status of the Fulfillment Inbound + Shipment API section. + """ + return self.post_request(path, kw, response) + + @requires(['SellerSkus'], ['QueryStartDateTime']) + @structured_lists('SellerSkus.member') + @api_action('Inventory', 30, 0.5) + def list_inventory_supply(self, path, response, **kw): + """Returns information about the availability of a seller's + inventory. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Inventory', 30, 0.5) + def list_inventory_supply_by_next_token(self, path, response, **kw): + """Returns the next page of information about the availability + of a seller's inventory using the NextToken parameter. + """ + return self.post_request(path, kw, response) + + @api_action('Inventory', 2, 300, 'GetServiceStatus') + def get_inventory_service_status(self, path, response, **kw): + """Returns the operational status of the Fulfillment Inventory + API section. + """ + return self.post_request(path, kw, response) + + @structured_objects('Address', 'Items') + @requires(['Address', 'Items']) + @api_action('Outbound', 30, 0.5) + def get_fulfillment_preview(self, path, response, **kw): + """Returns a list of fulfillment order previews based on items + and shipping speed categories that you specify. + """ + return self.post_request(path, kw, response) + + @structured_objects('DestinationAddress', 'Items') + @requires(['SellerFulfillmentOrderId', 'DisplayableOrderId', + 'ShippingSpeedCategory', 'DisplayableOrderDateTime', + 'DestinationAddress', 'DisplayableOrderComment', + 'Items']) + @api_action('Outbound', 30, 0.5) + def create_fulfillment_order(self, path, response, **kw): + """Requests that Amazon ship items from the seller's inventory + to a destination address. + """ + return self.post_request(path, kw, response) + + @requires(['SellerFulfillmentOrderId']) + @api_action('Outbound', 30, 0.5) + def get_fulfillment_order(self, path, response, **kw): + """Returns a fulfillment order based on a specified + SellerFulfillmentOrderId. + """ + return self.post_request(path, kw, response) + + @api_action('Outbound', 30, 0.5) + def list_all_fulfillment_orders(self, path, response, **kw): + """Returns a list of fulfillment orders fulfilled after (or + at) a specified date or by fulfillment method. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Outbound', 30, 0.5) + def list_all_fulfillment_orders_by_next_token(self, path, response, **kw): + """Returns the next page of inbound shipment items using the + NextToken parameter. + """ + return self.post_request(path, kw, response) + + @requires(['SellerFulfillmentOrderId']) + @api_action('Outbound', 30, 0.5) + def cancel_fulfillment_order(self, path, response, **kw): + """Requests that Amazon stop attempting to fulfill an existing + fulfillment order. + """ + return self.post_request(path, kw, response) + + @api_action('Outbound', 2, 300, 'GetServiceStatus') + def get_outbound_service_status(self, path, response, **kw): + """Returns the operational status of the Fulfillment Outbound + API section. + """ + return self.post_request(path, kw, response) + + @requires(['CreatedAfter'], ['LastUpdatedAfter']) + @exclusive(['CreatedAfter'], ['LastUpdatedAfter']) + @dependent('CreatedBefore', ['CreatedAfter']) + @exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId']) + @dependent('LastUpdatedBefore', ['LastUpdatedAfter']) + @exclusive(['CreatedAfter'], ['LastUpdatedBefore']) + @requires(['MarketplaceId']) + @structured_objects('OrderTotal', 'ShippingAddress', + 'PaymentExecutionDetail') + @structured_lists('MarketplaceId.Id', 'OrderStatus.Status', + 'FulfillmentChannel.Channel', 'PaymentMethod.') + @api_action('Orders', 6, 60) + def list_orders(self, path, response, **kw): + """Returns a list of orders created or updated during a time + frame that you specify. + """ + toggle = set(('FulfillmentChannel.Channel.1', + 'OrderStatus.Status.1', 'PaymentMethod.1', + 'LastUpdatedAfter', 'LastUpdatedBefore')) + for do, dont in { + 'BuyerEmail': toggle.union(['SellerOrderId']), + 'SellerOrderId': toggle.union(['BuyerEmail']), + }.items(): + if do in kw and filter(kw.has_key, dont): + message = "Don't include {0} when specifying " \ + "{1}".format(' or '.join(dont), do) + raise AssertionError(message) + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Orders', 6, 60) + def list_orders_by_next_token(self, path, response, **kw): + """Returns the next page of orders using the NextToken value + that was returned by your previous request to either + ListOrders or ListOrdersByNextToken. + """ + return self.post_request(path, kw, response) + + @requires(['AmazonOrderId']) + @structured_lists('AmazonOrderId.Id') + @api_action('Orders', 6, 60) + def get_order(self, path, response, **kw): + """Returns an order for each AmazonOrderId that you specify. + """ + return self.post_request(path, kw, response) + + @requires(['AmazonOrderId']) + @api_action('Orders', 30, 2) + def list_order_items(self, path, response, **kw): + """Returns order item information for an AmazonOrderId that + you specify. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Orders', 30, 2) + def list_order_items_by_next_token(self, path, response, **kw): + """Returns the next page of order items using the NextToken + value that was returned by your previous request to either + ListOrderItems or ListOrderItemsByNextToken. + """ + return self.post_request(path, kw, response) + + @api_action('Orders', 2, 300, 'GetServiceStatus') + def get_orders_service_status(self, path, response, **kw): + """Returns the operational status of the Orders API section. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'Query']) + @api_action('Products', 20, 20) + def list_matching_products(self, path, response, **kw): + """Returns a list of products and their attributes, ordered + by relevancy, based on a search query that you specify. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 20) + def get_matching_product(self, path, response, **kw): + """Returns a list of products and their attributes, based on + a list of ASIN values that you specify. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'IdType', 'IdList']) + @structured_lists('IdList.Id') + @api_action('Products', 20, 20) + def get_matching_product_for_id(self, path, response, **kw): + """Returns a list of products and their attributes, based on + a list of Product IDs that you specify. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 10, 'GetCompetitivePricingForSKU') + def get_competitive_pricing_for_sku(self, path, response, **kw): + """Returns the current competitive pricing of a product, + based on the SellerSKUs and MarketplaceId that you specify. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 10, 'GetCompetitivePricingForASIN') + def get_competitive_pricing_for_asin(self, path, response, **kw): + """Returns the current competitive pricing of a product, + based on the ASINs and MarketplaceId that you specify. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU') + def get_lowest_offer_listings_for_sku(self, path, response, **kw): + """Returns the lowest price offer listings for a specific + product by item condition and SellerSKUs. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN') + def get_lowest_offer_listings_for_asin(self, path, response, **kw): + """Returns the lowest price offer listings for a specific + product by item condition and ASINs. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'SellerSKU']) + @api_action('Products', 20, 20, 'GetProductCategoriesForSKU') + def get_product_categories_for_sku(self, path, response, **kw): + """Returns the product categories that a SellerSKU belongs to. + """ + return self.post_request(path, kw, response) + + @requires(['MarketplaceId', 'ASIN']) + @api_action('Products', 20, 20, 'GetProductCategoriesForASIN') + def get_product_categories_for_asin(self, path, response, **kw): + """Returns the product categories that an ASIN belongs to. + """ + return self.post_request(path, kw, response) + + @api_action('Products', 2, 300, 'GetServiceStatus') + def get_products_service_status(self, path, response, **kw): + """Returns the operational status of the Products API section. + """ + return self.post_request(path, kw, response) + + @api_action('Sellers', 15, 60) + def list_marketplace_participations(self, path, response, **kw): + """Returns a list of marketplaces that the seller submitting + the request can sell in, and a list of participations that + include seller-specific information in that marketplace. + """ + return self.post_request(path, kw, response) + + @requires(['NextToken']) + @api_action('Sellers', 15, 60) + def list_marketplace_participations_by_next_token(self, path, response, + **kw): + """Returns the next page of marketplaces and participations + using the NextToken value that was returned by your + previous request to either ListMarketplaceParticipations + or ListMarketplaceParticipationsByNextToken. + """ + return self.post_request(path, kw, response) diff --git a/awx/lib/site-packages/boto/mws/exception.py b/awx/lib/site-packages/boto/mws/exception.py new file mode 100644 index 0000000000..d84df4a853 --- /dev/null +++ b/awx/lib/site-packages/boto/mws/exception.py @@ -0,0 +1,75 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.exception import BotoServerError + + +class ResponseErrorFactory(BotoServerError): + + def __new__(cls, *args, **kw): + error = BotoServerError(*args, **kw) + try: + newclass = globals()[error.error_code] + except KeyError: + newclass = ResponseError + obj = newclass.__new__(newclass, *args, **kw) + obj.__dict__.update(error.__dict__) + return obj + + +class ResponseError(BotoServerError): + """ + Undefined response error. + """ + retry = False + + def __repr__(self): + return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__, + self.status, self.reason, + self.error_message) + + def __str__(self): + return 'MWS Response Error: {0.status} {0.__class__.__name__} {1}\n' \ + '{2}\n' \ + '{0.error_message}'.format(self, + self.retry and '(Retriable)' or '', + self.__doc__.strip()) + + +class RetriableResponseError(ResponseError): + retry = True + + +class InvalidParameterValue(ResponseError): + """ + One or more parameter values in the request is invalid. + """ + + +class InvalidParameter(ResponseError): + """ + One or more parameters in the request is invalid. + """ + + +class InvalidAddress(ResponseError): + """ + Invalid address. + """ diff --git a/awx/lib/site-packages/boto/mws/response.py b/awx/lib/site-packages/boto/mws/response.py new file mode 100644 index 0000000000..06740b56df --- /dev/null +++ b/awx/lib/site-packages/boto/mws/response.py @@ -0,0 +1,655 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from decimal import Decimal + + +class ComplexType(dict): + _value = 'Value' + + def __repr__(self): + return '{0}{1}'.format(getattr(self, self._value, None), self.copy()) + + def __str__(self): + return str(getattr(self, self._value, '')) + + +class DeclarativeType(object): + def __init__(self, _hint=None, **kw): + if _hint is not None: + self._hint = _hint + else: + class JITResponse(ResponseElement): + pass + self._hint = JITResponse + for name, value in kw.items(): + setattr(self._hint, name, value) + self._value = None + + def setup(self, parent, name, *args, **kw): + self._parent = parent + self._name = name + self._clone = self.__class__(self._hint) + self._clone._parent = parent + self._clone._name = name + setattr(self._parent, self._name, self._clone) + + def start(self, *args, **kw): + raise NotImplemented + + def end(self, *args, **kw): + raise NotImplemented + + def teardown(self, *args, **kw): + if self._value is None: + delattr(self._parent, self._name) + else: + setattr(self._parent, self._name, self._value) + + +class Element(DeclarativeType): + def start(self, *args, **kw): + self._value = self._hint(parent=self._parent, **kw) + return self._value + + def end(self, *args, **kw): + pass + + +class SimpleList(DeclarativeType): + def __init__(self, *args, **kw): + DeclarativeType.__init__(self, *args, **kw) + self._value = [] + + def teardown(self, *args, **kw): + if self._value == []: + self._value = None + DeclarativeType.teardown(self, *args, **kw) + + def start(self, *args, **kw): + return None + + def end(self, name, value, *args, **kw): + self._value.append(value) + + +class ElementList(SimpleList): + def start(self, *args, **kw): + value = self._hint(parent=self._parent, **kw) + self._value += [value] + return self._value[-1] + + def end(self, *args, **kw): + pass + + +class MemberList(ElementList): + def __init__(self, *args, **kw): + self._this = kw.get('this') + ElementList.__init__(self, *args, **kw) + + def start(self, attrs={}, **kw): + Class = self._this or self._parent._type_for(self._name, attrs) + if issubclass(self._hint, ResponseElement): + ListClass = ElementList + else: + ListClass = SimpleList + setattr(Class, Class._member, ListClass(self._hint)) + self._value = Class(attrs=attrs, parent=self._parent, **kw) + return self._value + + def end(self, *args, **kw): + self._value = getattr(self._value, self._value._member) + ElementList.end(self, *args, **kw) + + +def ResponseFactory(action): + result = globals().get(action + 'Result', ResponseElement) + + class MWSResponse(Response): + _name = action + 'Response' + + setattr(MWSResponse, action + 'Result', Element(result)) + return MWSResponse + + +def strip_namespace(func): + def wrapper(self, name, *args, **kw): + if self._namespace is not None: + if name.startswith(self._namespace + ':'): + name = name[len(self._namespace + ':'):] + return func(self, name, *args, **kw) + return wrapper + + +class ResponseElement(dict): + _override = {} + _member = 'member' + _name = None + _namespace = None + + def __init__(self, connection=None, name=None, parent=None, attrs={}): + if parent is not None and self._namespace is None: + self._namespace = parent._namespace + if connection is not None: + self._connection = connection + self._name = name or self._name or self.__class__.__name__ + self._declared('setup', attrs=attrs) + dict.__init__(self, attrs.copy()) + + def _declared(self, op, **kw): + def inherit(obj): + result = {} + for cls in getattr(obj, '__bases__', ()): + result.update(inherit(cls)) + result.update(obj.__dict__) + return result + + scope = inherit(self.__class__) + scope.update(self.__dict__) + declared = lambda attr: isinstance(attr[1], DeclarativeType) + for name, node in filter(declared, scope.items()): + getattr(node, op)(self, name, parentname=self._name, **kw) + + @property + def connection(self): + return self._connection + + def __repr__(self): + render = lambda pair: '{0!s}: {1!r}'.format(*pair) + do_show = lambda pair: not pair[0].startswith('_') + attrs = filter(do_show, self.__dict__.items()) + name = self.__class__.__name__ + if name == 'JITResponse': + name = '^{0}^'.format(self._name or '') + elif name == 'MWSResponse': + name = '^{0}^'.format(self._name or name) + return '{0}{1!r}({2})'.format( + name, self.copy(), ', '.join(map(render, attrs))) + + def _type_for(self, name, attrs): + return self._override.get(name, globals().get(name, ResponseElement)) + + @strip_namespace + def startElement(self, name, attrs, connection): + attribute = getattr(self, name, None) + if isinstance(attribute, DeclarativeType): + return attribute.start(name=name, attrs=attrs, + connection=connection) + elif attrs.getLength(): + setattr(self, name, ComplexType(attrs.copy())) + else: + return None + + @strip_namespace + def endElement(self, name, value, connection): + attribute = getattr(self, name, None) + if name == self._name: + self._declared('teardown') + elif isinstance(attribute, DeclarativeType): + attribute.end(name=name, value=value, connection=connection) + elif isinstance(attribute, ComplexType): + setattr(attribute, attribute._value, value) + else: + setattr(self, name, value) + + +class Response(ResponseElement): + ResponseMetadata = Element() + + @strip_namespace + def startElement(self, name, attrs, connection): + if name == self._name: + self.update(attrs) + else: + return ResponseElement.startElement(self, name, attrs, connection) + + @property + def _result(self): + return getattr(self, self._action + 'Result', None) + + @property + def _action(self): + return (self._name or self.__class__.__name__)[:-len('Response')] + + +class ResponseResultList(Response): + _ResultClass = ResponseElement + + def __init__(self, *args, **kw): + setattr(self, self._action + 'Result', ElementList(self._ResultClass)) + Response.__init__(self, *args, **kw) + + +class FeedSubmissionInfo(ResponseElement): + pass + + +class SubmitFeedResult(ResponseElement): + FeedSubmissionInfo = Element(FeedSubmissionInfo) + + +class GetFeedSubmissionListResult(ResponseElement): + FeedSubmissionInfo = ElementList(FeedSubmissionInfo) + + +class GetFeedSubmissionListByNextTokenResult(GetFeedSubmissionListResult): + pass + + +class GetFeedSubmissionCountResult(ResponseElement): + pass + + +class CancelFeedSubmissionsResult(GetFeedSubmissionListResult): + pass + + +class GetServiceStatusResult(ResponseElement): + Messages = Element(Messages=ElementList()) + + +class ReportRequestInfo(ResponseElement): + pass + + +class RequestReportResult(ResponseElement): + ReportRequestInfo = Element() + + +class GetReportRequestListResult(RequestReportResult): + ReportRequestInfo = ElementList() + + +class GetReportRequestListByNextTokenResult(GetReportRequestListResult): + pass + + +class CancelReportRequestsResult(RequestReportResult): + pass + + +class GetReportListResult(ResponseElement): + ReportInfo = ElementList() + + +class GetReportListByNextTokenResult(GetReportListResult): + pass + + +class ManageReportScheduleResult(ResponseElement): + ReportSchedule = Element() + + +class GetReportScheduleListResult(ManageReportScheduleResult): + pass + + +class GetReportScheduleListByNextTokenResult(GetReportScheduleListResult): + pass + + +class UpdateReportAcknowledgementsResult(GetReportListResult): + pass + + +class CreateInboundShipmentPlanResult(ResponseElement): + InboundShipmentPlans = MemberList(ShipToAddress=Element(), + Items=MemberList()) + + +class ListInboundShipmentsResult(ResponseElement): + ShipmentData = MemberList(Element(ShipFromAddress=Element())) + + +class ListInboundShipmentsByNextTokenResult(ListInboundShipmentsResult): + pass + + +class ListInboundShipmentItemsResult(ResponseElement): + ItemData = MemberList() + + +class ListInboundShipmentItemsByNextTokenResult(ListInboundShipmentItemsResult): + pass + + +class ListInventorySupplyResult(ResponseElement): + InventorySupplyList = MemberList( + EarliestAvailability=Element(), + SupplyDetail=MemberList(\ + EarliestAvailabileToPick=Element(), + LatestAvailableToPick=Element(), + ) + ) + + +class ListInventorySupplyByNextTokenResult(ListInventorySupplyResult): + pass + + +class ComplexAmount(ResponseElement): + _amount = 'Value' + + def __repr__(self): + return '{0} {1}'.format(self.CurrencyCode, getattr(self, self._amount)) + + def __float__(self): + return float(getattr(self, self._amount)) + + def __str__(self): + return str(getattr(self, self._amount)) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in ('CurrencyCode', self._amount): + message = 'Unrecognized tag {0} in ComplexAmount'.format(name) + raise AssertionError(message) + return ResponseElement.startElement(self, name, attrs, connection) + + @strip_namespace + def endElement(self, name, value, connection): + if name == self._amount: + value = Decimal(value) + ResponseElement.endElement(self, name, value, connection) + + +class ComplexMoney(ComplexAmount): + _amount = 'Amount' + + +class ComplexWeight(ResponseElement): + def __repr__(self): + return '{0} {1}'.format(self.Value, self.Unit) + + def __float__(self): + return float(self.Value) + + def __str__(self): + return str(self.Value) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in ('Unit', 'Value'): + message = 'Unrecognized tag {0} in ComplexWeight'.format(name) + raise AssertionError(message) + return ResponseElement.startElement(self, name, attrs, connection) + + @strip_namespace + def endElement(self, name, value, connection): + if name == 'Value': + value = Decimal(value) + ResponseElement.endElement(self, name, value, connection) + + +class Dimension(ComplexType): + _value = 'Value' + + +class ComplexDimensions(ResponseElement): + _dimensions = ('Height', 'Length', 'Width', 'Weight') + + def __repr__(self): + values = [getattr(self, key, None) for key in self._dimensions] + values = filter(None, values) + return 'x'.join(map('{0.Value:0.2f}{0[Units]}'.format, values)) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in self._dimensions: + message = 'Unrecognized tag {0} in ComplexDimensions'.format(name) + raise AssertionError(message) + setattr(self, name, Dimension(attrs.copy())) + + @strip_namespace + def endElement(self, name, value, connection): + if name in self._dimensions: + value = Decimal(value or '0') + ResponseElement.endElement(self, name, value, connection) + + +class FulfillmentPreviewItem(ResponseElement): + EstimatedShippingWeight = Element(ComplexWeight) + + +class FulfillmentPreview(ResponseElement): + EstimatedShippingWeight = Element(ComplexWeight) + EstimatedFees = MemberList(\ + Element(\ + Amount=Element(ComplexAmount), + ), + ) + UnfulfillablePreviewItems = MemberList(FulfillmentPreviewItem) + FulfillmentPreviewShipments = MemberList(\ + FulfillmentPreviewItems=MemberList(FulfillmentPreviewItem), + ) + + +class GetFulfillmentPreviewResult(ResponseElement): + FulfillmentPreviews = MemberList(FulfillmentPreview) + + +class FulfillmentOrder(ResponseElement): + DestinationAddress = Element() + NotificationEmailList = MemberList(str) + + +class GetFulfillmentOrderResult(ResponseElement): + FulfillmentOrder = Element(FulfillmentOrder) + FulfillmentShipment = MemberList(Element(\ + FulfillmentShipmentItem=MemberList(), + FulfillmentShipmentPackage=MemberList(), + ) + ) + FulfillmentOrderItem = MemberList() + + +class ListAllFulfillmentOrdersResult(ResponseElement): + FulfillmentOrders = MemberList(FulfillmentOrder) + + +class ListAllFulfillmentOrdersByNextTokenResult(ListAllFulfillmentOrdersResult): + pass + + +class Image(ResponseElement): + pass + + +class AttributeSet(ResponseElement): + ItemDimensions = Element(ComplexDimensions) + ListPrice = Element(ComplexMoney) + PackageDimensions = Element(ComplexDimensions) + SmallImage = Element(Image) + + +class ItemAttributes(AttributeSet): + Languages = Element(Language=ElementList()) + + def __init__(self, *args, **kw): + names = ('Actor', 'Artist', 'Author', 'Creator', 'Director', + 'Feature', 'Format', 'GemType', 'MaterialType', + 'MediaType', 'OperatingSystem', 'Platform') + for name in names: + setattr(self, name, SimpleList()) + AttributeSet.__init__(self, *args, **kw) + + +class VariationRelationship(ResponseElement): + Identifiers = Element(MarketplaceASIN=Element(), + SKUIdentifier=Element()) + GemType = SimpleList() + MaterialType = SimpleList() + OperatingSystem = SimpleList() + + +class Price(ResponseElement): + LandedPrice = Element(ComplexMoney) + ListingPrice = Element(ComplexMoney) + Shipping = Element(ComplexMoney) + + +class CompetitivePrice(ResponseElement): + Price = Element(Price) + + +class CompetitivePriceList(ResponseElement): + CompetitivePrice = ElementList(CompetitivePrice) + + +class CompetitivePricing(ResponseElement): + CompetitivePrices = Element(CompetitivePriceList) + NumberOfOfferListings = SimpleList() + TradeInValue = Element(ComplexMoney) + + +class SalesRank(ResponseElement): + pass + + +class LowestOfferListing(ResponseElement): + Qualifiers = Element(ShippingTime=Element()) + Price = Element(Price) + + +class Product(ResponseElement): + _namespace = 'ns2' + Identifiers = Element(MarketplaceASIN=Element(), + SKUIdentifier=Element()) + AttributeSets = Element(\ + ItemAttributes=ElementList(ItemAttributes), + ) + Relationships = Element(\ + VariationParent=ElementList(VariationRelationship), + ) + CompetitivePricing = ElementList(CompetitivePricing) + SalesRankings = Element(\ + SalesRank=ElementList(SalesRank), + ) + LowestOfferListings = Element(\ + LowestOfferListing=ElementList(LowestOfferListing), + ) + + +class ListMatchingProductsResult(ResponseElement): + Products = Element(Product=ElementList(Product)) + + +class ProductsBulkOperationResult(ResponseElement): + Product = Element(Product) + Error = Element() + + +class ProductsBulkOperationResponse(ResponseResultList): + _ResultClass = ProductsBulkOperationResult + + +class GetMatchingProductResponse(ProductsBulkOperationResponse): + pass + + +class GetMatchingProductForIdResult(ListMatchingProductsResult): + pass + + +class GetCompetitivePricingForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetCompetitivePricingForASINResponse(ProductsBulkOperationResponse): + pass + + +class GetLowestOfferListingsForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetLowestOfferListingsForASINResponse(ProductsBulkOperationResponse): + pass + + +class ProductCategory(ResponseElement): + + def __init__(self, *args, **kw): + setattr(self, 'Parent', Element(ProductCategory)) + ResponseElement.__init__(self, *args, **kw) + + +class GetProductCategoriesResult(ResponseElement): + Self = Element(ProductCategory) + + +class GetProductCategoriesForSKUResult(GetProductCategoriesResult): + pass + + +class GetProductCategoriesForASINResult(GetProductCategoriesResult): + pass + + +class Order(ResponseElement): + OrderTotal = Element(ComplexMoney) + ShippingAddress = Element() + PaymentExecutionDetail = Element(\ + PaymentExecutionDetailItem=ElementList(\ + PaymentExecutionDetailItem=Element(\ + Payment=Element(ComplexMoney) + ) + ) + ) + + +class ListOrdersResult(ResponseElement): + Orders = Element(Order=ElementList(Order)) + + +class ListOrdersByNextTokenResult(ListOrdersResult): + pass + + +class GetOrderResult(ListOrdersResult): + pass + + +class OrderItem(ResponseElement): + ItemPrice = Element(ComplexMoney) + ShippingPrice = Element(ComplexMoney) + GiftWrapPrice = Element(ComplexMoney) + ItemTax = Element(ComplexMoney) + ShippingTax = Element(ComplexMoney) + GiftWrapTax = Element(ComplexMoney) + ShippingDiscount = Element(ComplexMoney) + PromotionDiscount = Element(ComplexMoney) + PromotionIds = SimpleList() + CODFee = Element(ComplexMoney) + CODFeeDiscount = Element(ComplexMoney) + + +class ListOrderItemsResult(ResponseElement): + OrderItems = Element(OrderItem=ElementList(OrderItem)) + + +class ListMarketplaceParticipationsResult(ResponseElement): + ListParticipations = Element(Participation=ElementList()) + ListMarketplaces = Element(Marketplace=ElementList()) + + +class ListMarketplaceParticipationsByNextTokenResult(ListMarketplaceParticipationsResult): + pass diff --git a/awx/lib/site-packages/boto/opsworks/__init__.py b/awx/lib/site-packages/boto/opsworks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/boto/opsworks/exceptions.py b/awx/lib/site-packages/boto/opsworks/exceptions.py new file mode 100644 index 0000000000..da23e48521 --- /dev/null +++ b/awx/lib/site-packages/boto/opsworks/exceptions.py @@ -0,0 +1,30 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class ResourceNotFoundException(JSONResponseError): + pass + + +class ValidationException(JSONResponseError): + pass diff --git a/awx/lib/site-packages/boto/opsworks/layer1.py b/awx/lib/site-packages/boto/opsworks/layer1.py new file mode 100644 index 0000000000..ba147cd079 --- /dev/null +++ b/awx/lib/site-packages/boto/opsworks/layer1.py @@ -0,0 +1,1960 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import json +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.opsworks import exceptions + + +class OpsWorksConnection(AWSQueryConnection): + """ + AWS OpsWorks + Welcome to the AWS OpsWorks API Reference . This guide provides + descriptions, syntax, and usage examples about AWS OpsWorks + actions and data types, including common parameters and error + codes. + + AWS OpsWorks is an application management service that provides an + integrated experience for overseeing the complete application + lifecycle. For information about this product, go to the `AWS + OpsWorks`_ details page. + + **Endpoints** + + AWS OpsWorks supports only one endpoint, opsworks.us- + east-1.amazonaws.com (HTTPS), so you must connect to that + endpoint. You can then use the API to direct AWS OpsWorks to + create stacks in any AWS Region. + + **Chef Version** + + When you call CreateStack, CloneStack, or UpdateStack we recommend + you use the `ConfigurationManager` parameter to specify the Chef + version, 0.9 or 11.4. The default value is currently 0.9. However, + we expect to change the default value to 11.4 in September 2013. + """ + APIVersion = "2013-02-18" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com" + ServiceName = "OpsWorks" + TargetPrefix = "OpsWorks_20130218" + ResponseError = JSONResponseError + + _faults = { + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "ValidationException": exceptions.ValidationException, + } + + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + kwargs['host'] = region.endpoint + AWSQueryConnection.__init__(self, **kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def attach_elastic_load_balancer(self, elastic_load_balancer_name, + layer_id): + """ + Attaches an Elastic Load Balancing instance to a specified + layer. + + You must create the Elastic Load Balancing instance + separately, by using the Elastic Load Balancing console, API, + or CLI. For more information, see ` Elastic Load Balancing + Developer Guide`_. + + :type elastic_load_balancer_name: string + :param elastic_load_balancer_name: The Elastic Load Balancing + instance's name. + + :type layer_id: string + :param layer_id: The ID of the layer that the Elastic Load Balancing + instance is to be attached to. + + """ + params = { + 'ElasticLoadBalancerName': elastic_load_balancer_name, + 'LayerId': layer_id, + } + return self.make_request(action='AttachElasticLoadBalancer', + body=json.dumps(params)) + + def clone_stack(self, source_stack_id, service_role_arn, name=None, + region=None, vpc_id=None, attributes=None, + default_instance_profile_arn=None, default_os=None, + hostname_theme=None, default_availability_zone=None, + default_subnet_id=None, custom_json=None, + configuration_manager=None, use_custom_cookbooks=None, + custom_cookbooks_source=None, default_ssh_key_name=None, + clone_permissions=None, clone_app_ids=None, + default_root_device_type=None): + """ + Creates a clone of a specified stack. For more information, + see `Clone a Stack`_. + + :type source_stack_id: string + :param source_stack_id: The source stack ID. + + :type name: string + :param name: The cloned stack name. + + :type region: string + :param region: The cloned stack AWS region, such as "us-east-1". For + more information about AWS regions, see `Regions and Endpoints`_. + + :type vpc_id: string + :param vpc_id: The ID of the VPC that the cloned stack is to be + launched into. It must be in the specified region. All instances + will be launched into this VPC, and you cannot change the ID later. + + + If your account supports EC2 Classic, the default value is no VPC. + + If you account does not support EC2 Classic, the default value is the + default VPC for the specified region. + + + If the VPC ID corresponds to a default VPC and you have specified + either the `DefaultAvailabilityZone` or the `DefaultSubnetId` + parameter only, AWS OpsWorks infers the value of the other + parameter. If you specify neither parameter, AWS OpsWorks sets + these parameters to the first valid Availability Zone for the + specified region and the corresponding default VPC subnet ID, + respectively. + + If you specify a nondefault VPC ID, note the following: + + + + It must belong to a VPC in your account that is in the specified + region. + + You must specify a value for `DefaultSubnetId`. + + + For more information on how to use AWS OpsWorks with a VPC, see + `Running a Stack in a VPC`_. For more information on default VPC + and EC2 Classic, see `Supported Platforms`_. + + :type attributes: map + :param attributes: A list of stack attributes and values as key/value + pairs to be added to the cloned stack. + + :type service_role_arn: string + :param service_role_arn: + The stack AWS Identity and Access Management (IAM) role, which allows + AWS OpsWorks to work with AWS resources on your behalf. You must + set this parameter to the Amazon Resource Name (ARN) for an + existing IAM role. If you create a stack by using the AWS OpsWorks + console, it creates the role for you. You can obtain an existing + stack's IAM ARN programmatically by calling DescribePermissions. + For more information about IAM ARNs, see `Using Identifiers`_. + + You must set this parameter to a valid service role ARN or the action + will fail; there is no default value. You can specify the source + stack's service role ARN, if you prefer, but you must do so + explicitly. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The cloned stack's default operating system, which + must be set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default + option is `Amazon Linux`. + + :type hostname_theme: string + :param hostname_theme: The stack's host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + Layer_Dependent, which creates host names by appending integers to + the layer's short name. The other themes are: + + + Baked_Goods + + Clouds + + European_Cities + + Fruits + + Greek_Deities + + Legendary_Creatures_from_Japan + + Planets_and_Moons + + Roman_Deities + + Scottish_Islands + + US_Cities + + Wild_Cats + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The cloned stack's default + Availability Zone, which must be in the specified region. For more + information, see `Regions and Endpoints`_. If you also specify a + value for `DefaultSubnetId`, the subnet must be in the same zone. + For more information, see the `VpcId` parameter description. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default subnet ID. All instances + will be launched into this subnet unless you specify otherwise when + you create the instance. If you also specify a value for + `DefaultAvailabilityZone`, the subnet must be in the same zone. For + information on default values and when this parameter is required, + see the `VpcId` parameter description. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: `"{\"key1\": + \"value1\", \"key2\": \"value2\",...}"` + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_ + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you clone + a stack we recommend that you use the configuration manager to + specify the Chef version, 0.9 or 11.4. The default value is + currently 0.9. However, we expect to change the default value to + 11.4 in September 2013. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether to use custom cookbooks. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type clone_permissions: boolean + :param clone_permissions: Whether to clone the source stack's + permissions. + + :type clone_app_ids: list + :param clone_app_ids: A list of source stack app IDs to be included in + the cloned stack. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the cloned stack, but + you can override it when you create an instance. For more + information, see `Storage for the Root Device`_. + + """ + params = { + 'SourceStackId': source_stack_id, + 'ServiceRoleArn': service_role_arn, + } + if name is not None: + params['Name'] = name + if region is not None: + params['Region'] = region + if vpc_id is not None: + params['VpcId'] = vpc_id + if attributes is not None: + params['Attributes'] = attributes + if default_instance_profile_arn is not None: + params['DefaultInstanceProfileArn'] = default_instance_profile_arn + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if clone_permissions is not None: + params['ClonePermissions'] = clone_permissions + if clone_app_ids is not None: + params['CloneAppIds'] = clone_app_ids + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + return self.make_request(action='CloneStack', + body=json.dumps(params)) + + def create_app(self, stack_id, name, type, shortname=None, + description=None, app_source=None, domains=None, + enable_ssl=None, ssl_configuration=None, attributes=None): + """ + Creates an app for a specified stack. For more information, + see `Creating Apps`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type shortname: string + :param shortname: The app's short name. + + :type name: string + :param name: The app name. + + :type description: string + :param description: A description of the app. + + :type type: string + :param type: The app type. Each supported type is associated with a + particular layer. For example, PHP applications are associated with + a PHP layer. AWS OpsWorks deploys an application to those instances + that are members of the corresponding layer. + + :type app_source: dict + :param app_source: A `Source` object that specifies the app repository. + + :type domains: list + :param domains: The app virtual host settings, with multiple domains + separated by commas. For example: `'www.example.com, example.com'` + + :type enable_ssl: boolean + :param enable_ssl: Whether to enable SSL for the app. + + :type ssl_configuration: dict + :param ssl_configuration: An `SslConfiguration` object with the SSL + configuration. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes bag. + + """ + params = {'StackId': stack_id, 'Name': name, 'Type': type, } + if shortname is not None: + params['Shortname'] = shortname + if description is not None: + params['Description'] = description + if app_source is not None: + params['AppSource'] = app_source + if domains is not None: + params['Domains'] = domains + if enable_ssl is not None: + params['EnableSsl'] = enable_ssl + if ssl_configuration is not None: + params['SslConfiguration'] = ssl_configuration + if attributes is not None: + params['Attributes'] = attributes + return self.make_request(action='CreateApp', + body=json.dumps(params)) + + def create_deployment(self, stack_id, command, app_id=None, + instance_ids=None, comment=None, custom_json=None): + """ + Deploys a stack or app. + + + + App deployment generates a `deploy` event, which runs the + associated recipes and passes them a JSON stack configuration + object that includes information about the app. + + Stack deployment runs the `deploy` recipes but does not + raise an event. + + + For more information, see `Deploying Apps`_ and `Run Stack + Commands`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type app_id: string + :param app_id: The app ID. This parameter is required for app + deployments, but not for other deployment commands. + + :type instance_ids: list + :param instance_ids: The instance IDs for the deployment targets. + + :type command: dict + :param command: A `DeploymentCommand` object that specifies the + deployment command and any associated arguments. + + :type comment: string + :param comment: A user-defined comment. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: `"{\"key1\": + \"value1\", \"key2\": \"value2\",...}"` + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + """ + params = {'StackId': stack_id, 'Command': command, } + if app_id is not None: + params['AppId'] = app_id + if instance_ids is not None: + params['InstanceIds'] = instance_ids + if comment is not None: + params['Comment'] = comment + if custom_json is not None: + params['CustomJson'] = custom_json + return self.make_request(action='CreateDeployment', + body=json.dumps(params)) + + def create_instance(self, stack_id, layer_ids, instance_type, + auto_scaling_type=None, hostname=None, os=None, + ami_id=None, ssh_key_name=None, + availability_zone=None, subnet_id=None, + architecture=None, root_device_type=None, + install_updates_on_boot=None): + """ + Creates an instance in a specified stack. For more + information, see `Adding an Instance to a Layer`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type layer_ids: list + :param layer_ids: An array that contains the instance layer IDs. + + :type instance_type: string + :param instance_type: The instance type. AWS OpsWorks supports all + instance types except Cluster Compute, Cluster GPU, and High Memory + Cluster. For more information, see `Instance Families and Types`_. + The parameter values that you use to specify the various types are + in the API Name column of the Available Instance Types table. + + :type auto_scaling_type: string + :param auto_scaling_type: + The instance auto scaling type, which has three possible values: + + + + **AlwaysRunning**: A 24/7 instance, which is not affected by auto + scaling. + + **TimeBasedAutoScaling**: A time-based auto scaling instance, which + is started and stopped based on a specified schedule. To specify + the schedule, call SetTimeBasedAutoScaling. + + **LoadBasedAutoScaling**: A load-based auto scaling instance, which + is started and stopped based on load metrics. To use load-based + auto scaling, you must enable it for the instance layer and + configure the thresholds by calling SetLoadBasedAutoScaling. + + :type hostname: string + :param hostname: The instance host name. + + :type os: string + :param os: The instance operating system, which must be set to one of + the following. + + + Standard operating systems: `Amazon Linux` or `Ubuntu 12.04 LTS` + + Custom AMIs: `Custom` + + + The default option is `Amazon Linux`. If you set this parameter to + `Custom`, you must use the CreateInstance action's AmiId parameter + to specify the custom AMI that you want to use. For more + information on the standard operating systems, see `Operating + Systems`_For more information on how to use custom AMIs with + OpsWorks, see `Using Custom AMIs`_. + + :type ami_id: string + :param ami_id: A custom AMI ID to be used to create the instance. The + AMI should be based on one of the standard AWS OpsWorks APIs: + Amazon Linux or Ubuntu 12.04 LTS. For more information, see + `Instances`_ + + :type ssh_key_name: string + :param ssh_key_name: The instance SSH key name. + + :type availability_zone: string + :param availability_zone: The instance Availability Zone. For more + information, see `Regions and Endpoints`_. + + :type subnet_id: string + :param subnet_id: The ID of the instance's subnet. If the stack is + running in a VPC, you can use this parameter to override the + stack's default subnet ID value and direct AWS OpsWorks to launch + the instance in a different subnet. + + :type architecture: string + :param architecture: The instance architecture. Instance types do not + necessarily support both architectures. For a list of the + architectures that are supported by the different instance types, + see `Instance Families and Types`_. + + :type root_device_type: string + :param root_device_type: The instance root device type. For more + information, see `Storage for the Root Device`_. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + """ + params = { + 'StackId': stack_id, + 'LayerIds': layer_ids, + 'InstanceType': instance_type, + } + if auto_scaling_type is not None: + params['AutoScalingType'] = auto_scaling_type + if hostname is not None: + params['Hostname'] = hostname + if os is not None: + params['Os'] = os + if ami_id is not None: + params['AmiId'] = ami_id + if ssh_key_name is not None: + params['SshKeyName'] = ssh_key_name + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if subnet_id is not None: + params['SubnetId'] = subnet_id + if architecture is not None: + params['Architecture'] = architecture + if root_device_type is not None: + params['RootDeviceType'] = root_device_type + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + return self.make_request(action='CreateInstance', + body=json.dumps(params)) + + def create_layer(self, stack_id, type, name, shortname, attributes=None, + custom_instance_profile_arn=None, + custom_security_group_ids=None, packages=None, + volume_configurations=None, enable_auto_healing=None, + auto_assign_elastic_ips=None, custom_recipes=None, + install_updates_on_boot=None): + """ + Creates a layer. For more information, see `How to Create a + Layer`_. + + You should use **CreateLayer** for noncustom layer types such + as PHP App Server only if the stack does not have an existing + layer of that type. A stack can have at most one instance of + each noncustom layer; if you attempt to create a second + instance, **CreateLayer** fails. A stack can have an arbitrary + number of custom layers, so you can call **CreateLayer** as + many times as you like for that layer type. + + :type stack_id: string + :param stack_id: The layer stack ID. + + :type type: string + :param type: + The layer type. A stack cannot have more than one layer of the same + type. This parameter must be set to one of the following: + + + + lb: An HAProxy layer + + web: A Static Web Server layer + + rails-app: A Rails App Server layer + + php-app: A PHP App Server layer + + nodejs-app: A Node.js App Server layer + + memcached: A Memcached layer + + db-master: A MySQL layer + + monitoring-master: A Ganglia layer + + custom: A custom layer + + :type name: string + :param name: The layer name, which is used by the console. + + :type shortname: string + :param shortname: The layer short name, which is used internally by AWS + OpsWorks and by Chef recipes. The short name is also used as the + name for the directory where your app files are installed. It can + have a maximum of 200 characters, which are limited to the + alphanumeric characters, '-', '_', and '.'. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes bag. + + :type custom_instance_profile_arn: string + :param custom_instance_profile_arn: The ARN of an IAM profile that to + be used for the layer's EC2 instances. For more information about + IAM ARNs, see `Using Identifiers`_. + + :type custom_security_group_ids: list + :param custom_security_group_ids: An array containing the layer custom + security group IDs. + + :type packages: list + :param packages: An array of `Package` objects that describe the layer + packages. + + :type volume_configurations: list + :param volume_configurations: A `VolumeConfigurations` object that + describes the layer Amazon EBS volumes. + + :type enable_auto_healing: boolean + :param enable_auto_healing: Whether to disable auto healing for the + layer. + + :type auto_assign_elastic_ips: boolean + :param auto_assign_elastic_ips: Whether to automatically assign an + `Elastic IP address`_ to the layer. + + :type custom_recipes: dict + :param custom_recipes: A `LayerCustomRecipes` object that specifies the + layer custom recipes. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + """ + params = { + 'StackId': stack_id, + 'Type': type, + 'Name': name, + 'Shortname': shortname, + } + if attributes is not None: + params['Attributes'] = attributes + if custom_instance_profile_arn is not None: + params['CustomInstanceProfileArn'] = custom_instance_profile_arn + if custom_security_group_ids is not None: + params['CustomSecurityGroupIds'] = custom_security_group_ids + if packages is not None: + params['Packages'] = packages + if volume_configurations is not None: + params['VolumeConfigurations'] = volume_configurations + if enable_auto_healing is not None: + params['EnableAutoHealing'] = enable_auto_healing + if auto_assign_elastic_ips is not None: + params['AutoAssignElasticIps'] = auto_assign_elastic_ips + if custom_recipes is not None: + params['CustomRecipes'] = custom_recipes + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + return self.make_request(action='CreateLayer', + body=json.dumps(params)) + + def create_stack(self, name, region, service_role_arn, + default_instance_profile_arn, vpc_id=None, + attributes=None, default_os=None, hostname_theme=None, + default_availability_zone=None, default_subnet_id=None, + custom_json=None, configuration_manager=None, + use_custom_cookbooks=None, custom_cookbooks_source=None, + default_ssh_key_name=None, + default_root_device_type=None): + """ + Creates a new stack. For more information, see `Create a New + Stack`_. + + :type name: string + :param name: The stack name. + + :type region: string + :param region: The stack AWS region, such as "us-east-1". For more + information about Amazon regions, see `Regions and Endpoints`_. + + :type vpc_id: string + :param vpc_id: The ID of the VPC that the stack is to be launched into. + It must be in the specified region. All instances will be launched + into this VPC, and you cannot change the ID later. + + + If your account supports EC2 Classic, the default value is no VPC. + + If you account does not support EC2 Classic, the default value is the + default VPC for the specified region. + + + If the VPC ID corresponds to a default VPC and you have specified + either the `DefaultAvailabilityZone` or the `DefaultSubnetId` + parameter only, AWS OpsWorks infers the value of the other + parameter. If you specify neither parameter, AWS OpsWorks sets + these parameters to the first valid Availability Zone for the + specified region and the corresponding default VPC subnet ID, + respectively. + + If you specify a nondefault VPC ID, note the following: + + + + It must belong to a VPC in your account that is in the specified + region. + + You must specify a value for `DefaultSubnetId`. + + + For more information on how to use AWS OpsWorks with a VPC, see + `Running a Stack in a VPC`_. For more information on default VPC + and EC2 Classic, see `Supported Platforms`_. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes bag. + + :type service_role_arn: string + :param service_role_arn: The stack AWS Identity and Access Management + (IAM) role, which allows AWS OpsWorks to work with AWS resources on + your behalf. You must set this parameter to the Amazon Resource + Name (ARN) for an existing IAM role. For more information about IAM + ARNs, see `Using Identifiers`_. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The stack's default operating system, which must be + set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is + `Amazon Linux`. + + :type hostname_theme: string + :param hostname_theme: The stack's host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + Layer_Dependent, which creates host names by appending integers to + the layer's short name. The other themes are: + + + Baked_Goods + + Clouds + + European_Cities + + Fruits + + Greek_Deities + + Legendary_Creatures_from_Japan + + Planets_and_Moons + + Roman_Deities + + Scottish_Islands + + US_Cities + + Wild_Cats + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The stack's default Availability + Zone, which must be in the specified region. For more information, + see `Regions and Endpoints`_. If you also specify a value for + `DefaultSubnetId`, the subnet must be in the same zone. For more + information, see the `VpcId` parameter description. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default subnet ID. All instances + will be launched into this subnet unless you specify otherwise when + you create the instance. If you also specify a value for + `DefaultAvailabilityZone`, the subnet must be in that zone. For + information on default values and when this parameter is required, + see the `VpcId` parameter description. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: `"{\"key1\": + \"value1\", \"key2\": \"value2\",...}"` + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you + create a stack we recommend that you use the configuration manager + to specify the Chef version, 0.9 or 11.4. The default value is + currently 0.9. However, we expect to change the default value to + 11.4 in September 2013. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether the stack uses custom cookbooks. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the cloned stack, but + you can override it when you create an instance. For more + information, see `Storage for the Root Device`_. + + """ + params = { + 'Name': name, + 'Region': region, + 'ServiceRoleArn': service_role_arn, + 'DefaultInstanceProfileArn': default_instance_profile_arn, + } + if vpc_id is not None: + params['VpcId'] = vpc_id + if attributes is not None: + params['Attributes'] = attributes + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + return self.make_request(action='CreateStack', + body=json.dumps(params)) + + def create_user_profile(self, iam_user_arn, ssh_username=None, + ssh_public_key=None): + """ + Creates a new user profile. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + :type ssh_username: string + :param ssh_username: The user's SSH user name. + + :type ssh_public_key: string + :param ssh_public_key: The user's public SSH key. + + """ + params = {'IamUserArn': iam_user_arn, } + if ssh_username is not None: + params['SshUsername'] = ssh_username + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + return self.make_request(action='CreateUserProfile', + body=json.dumps(params)) + + def delete_app(self, app_id): + """ + Deletes a specified app. + + :type app_id: string + :param app_id: The app ID. + + """ + params = {'AppId': app_id, } + return self.make_request(action='DeleteApp', + body=json.dumps(params)) + + def delete_instance(self, instance_id, delete_elastic_ip=None, + delete_volumes=None): + """ + Deletes a specified instance. You must stop an instance before + you can delete it. For more information, see `Deleting + Instances`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type delete_elastic_ip: boolean + :param delete_elastic_ip: Whether to delete the instance Elastic IP + address. + + :type delete_volumes: boolean + :param delete_volumes: Whether to delete the instance Amazon EBS + volumes. + + """ + params = {'InstanceId': instance_id, } + if delete_elastic_ip is not None: + params['DeleteElasticIp'] = delete_elastic_ip + if delete_volumes is not None: + params['DeleteVolumes'] = delete_volumes + return self.make_request(action='DeleteInstance', + body=json.dumps(params)) + + def delete_layer(self, layer_id): + """ + Deletes a specified layer. You must first stop and then delete + all associated instances. For more information, see `How to + Delete a Layer`_. + + :type layer_id: string + :param layer_id: The layer ID. + + """ + params = {'LayerId': layer_id, } + return self.make_request(action='DeleteLayer', + body=json.dumps(params)) + + def delete_stack(self, stack_id): + """ + Deletes a specified stack. You must first delete all + instances, layers, and apps. For more information, see `Shut + Down a Stack`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DeleteStack', + body=json.dumps(params)) + + def delete_user_profile(self, iam_user_arn): + """ + Deletes a user profile. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + """ + params = {'IamUserArn': iam_user_arn, } + return self.make_request(action='DeleteUserProfile', + body=json.dumps(params)) + + def describe_apps(self, stack_id=None, app_ids=None): + """ + Requests a description of a specified set of apps. + + You must specify at least one of the parameters. + + :type stack_id: string + :param stack_id: The app stack ID. If you use this parameter, + `DescribeApps` returns a description of the apps in the specified + stack. + + :type app_ids: list + :param app_ids: An array of app IDs for the apps to be described. If + you use this parameter, `DescribeApps` returns a description of the + specified apps. Otherwise, it returns a description of every app. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if app_ids is not None: + params['AppIds'] = app_ids + return self.make_request(action='DescribeApps', + body=json.dumps(params)) + + def describe_commands(self, deployment_id=None, instance_id=None, + command_ids=None): + """ + Describes the results of specified commands. + + You must specify at least one of the parameters. + + :type deployment_id: string + :param deployment_id: The deployment ID. If you include this parameter, + `DescribeCommands` returns a description of the commands associated + with the specified deployment. + + :type instance_id: string + :param instance_id: The instance ID. If you include this parameter, + `DescribeCommands` returns a description of the commands associated + with the specified instance. + + :type command_ids: list + :param command_ids: An array of command IDs. If you include this + parameter, `DescribeCommands` returns a description of the + specified commands. Otherwise, it returns a description of every + command. + + """ + params = {} + if deployment_id is not None: + params['DeploymentId'] = deployment_id + if instance_id is not None: + params['InstanceId'] = instance_id + if command_ids is not None: + params['CommandIds'] = command_ids + return self.make_request(action='DescribeCommands', + body=json.dumps(params)) + + def describe_deployments(self, stack_id=None, app_id=None, + deployment_ids=None): + """ + Requests a description of a specified set of deployments. + + You must specify at least one of the parameters. + + :type stack_id: string + :param stack_id: The stack ID. If you include this parameter, + `DescribeDeployments` returns a description of the commands + associated with the specified stack. + + :type app_id: string + :param app_id: The app ID. If you include this parameter, + `DescribeDeployments` returns a description of the commands + associated with the specified app. + + :type deployment_ids: list + :param deployment_ids: An array of deployment IDs to be described. If + you include this parameter, `DescribeDeployments` returns a + description of the specified deployments. Otherwise, it returns a + description of every deployment. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if app_id is not None: + params['AppId'] = app_id + if deployment_ids is not None: + params['DeploymentIds'] = deployment_ids + return self.make_request(action='DescribeDeployments', + body=json.dumps(params)) + + def describe_elastic_ips(self, instance_id=None, ips=None): + """ + Describes `Elastic IP addresses`_. + + You must specify at least one of the parameters. + + :type instance_id: string + :param instance_id: The instance ID. If you include this parameter, + `DescribeElasticIps` returns a description of the Elastic IP + addresses associated with the specified instance. + + :type ips: list + :param ips: An array of Elastic IP addresses to be described. If you + include this parameter, `DescribeElasticIps` returns a description + of the specified Elastic IP addresses. Otherwise, it returns a + description of every Elastic IP address. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if ips is not None: + params['Ips'] = ips + return self.make_request(action='DescribeElasticIps', + body=json.dumps(params)) + + def describe_elastic_load_balancers(self, stack_id=None, layer_ids=None): + """ + Describes a stack's Elastic Load Balancing instances. + + You must specify at least one of the parameters. + + :type stack_id: string + :param stack_id: A stack ID. The action describes the Elastic Load + Balancing instances for the stack. + + :type layer_ids: list + :param layer_ids: A list of layer IDs. The action describes the Elastic + Load Balancing instances for the specified layers. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if layer_ids is not None: + params['LayerIds'] = layer_ids + return self.make_request(action='DescribeElasticLoadBalancers', + body=json.dumps(params)) + + def describe_instances(self, stack_id=None, layer_id=None, + instance_ids=None): + """ + Requests a description of a set of instances. + + You must specify at least one of the parameters. + + :type stack_id: string + :param stack_id: A stack ID. If you use this parameter, + `DescribeInstances` returns descriptions of the instances + associated with the specified stack. + + :type layer_id: string + :param layer_id: A layer ID. If you use this parameter, + `DescribeInstances` returns descriptions of the instances + associated with the specified layer. + + :type instance_ids: list + :param instance_ids: An array of instance IDs to be described. If you + use this parameter, `DescribeInstances` returns a description of + the specified instances. Otherwise, it returns a description of + every instance. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if layer_id is not None: + params['LayerId'] = layer_id + if instance_ids is not None: + params['InstanceIds'] = instance_ids + return self.make_request(action='DescribeInstances', + body=json.dumps(params)) + + def describe_layers(self, stack_id, layer_ids=None): + """ + Requests a description of one or more layers in a specified + stack. + + You must specify at least one of the parameters. + + :type stack_id: string + :param stack_id: The stack ID. + + :type layer_ids: list + :param layer_ids: An array of layer IDs that specify the layers to be + described. If you omit this parameter, `DescribeLayers` returns a + description of every layer in the specified stack. + + """ + params = {'StackId': stack_id, } + if layer_ids is not None: + params['LayerIds'] = layer_ids + return self.make_request(action='DescribeLayers', + body=json.dumps(params)) + + def describe_load_based_auto_scaling(self, layer_ids): + """ + Describes load-based auto scaling configurations for specified + layers. + + You must specify at least one of the parameters. + + :type layer_ids: list + :param layer_ids: An array of layer IDs. + + """ + params = {'LayerIds': layer_ids, } + return self.make_request(action='DescribeLoadBasedAutoScaling', + body=json.dumps(params)) + + def describe_permissions(self, iam_user_arn, stack_id): + """ + Describes the permissions for a specified stack. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. For more information about IAM + ARNs, see `Using Identifiers`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'IamUserArn': iam_user_arn, 'StackId': stack_id, } + return self.make_request(action='DescribePermissions', + body=json.dumps(params)) + + def describe_raid_arrays(self, instance_id=None, raid_array_ids=None): + """ + Describe an instance's RAID arrays. + + You must specify at least one of the parameters. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeRaidArrays` returns descriptions of the RAID arrays + associated with the specified instance. + + :type raid_array_ids: list + :param raid_array_ids: An array of RAID array IDs. If you use this + parameter, `DescribeRaidArrays` returns descriptions of the + specified arrays. Otherwise, it returns a description of every + array. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if raid_array_ids is not None: + params['RaidArrayIds'] = raid_array_ids + return self.make_request(action='DescribeRaidArrays', + body=json.dumps(params)) + + def describe_service_errors(self, stack_id=None, instance_id=None, + service_error_ids=None): + """ + Describes AWS OpsWorks service errors. + + :type stack_id: string + :param stack_id: The stack ID. If you use this parameter, + `DescribeServiceErrors` returns descriptions of the errors + associated with the specified stack. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeServiceErrors` returns descriptions of the errors + associated with the specified instance. + + :type service_error_ids: list + :param service_error_ids: An array of service error IDs. If you use + this parameter, `DescribeServiceErrors` returns descriptions of the + specified errors. Otherwise, it returns a description of every + error. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if instance_id is not None: + params['InstanceId'] = instance_id + if service_error_ids is not None: + params['ServiceErrorIds'] = service_error_ids + return self.make_request(action='DescribeServiceErrors', + body=json.dumps(params)) + + def describe_stacks(self, stack_ids=None): + """ + Requests a description of one or more stacks. + + :type stack_ids: list + :param stack_ids: An array of stack IDs that specify the stacks to be + described. If you omit this parameter, `DescribeStacks` returns a + description of every stack. + + """ + params = {} + if stack_ids is not None: + params['StackIds'] = stack_ids + return self.make_request(action='DescribeStacks', + body=json.dumps(params)) + + def describe_time_based_auto_scaling(self, instance_ids): + """ + Describes time-based auto scaling configurations for specified + instances. + + You must specify at least one of the parameters. + + :type instance_ids: list + :param instance_ids: An array of instance IDs. + + """ + params = {'InstanceIds': instance_ids, } + return self.make_request(action='DescribeTimeBasedAutoScaling', + body=json.dumps(params)) + + def describe_user_profiles(self, iam_user_arns): + """ + Describe specified users. + + :type iam_user_arns: list + :param iam_user_arns: An array of IAM user ARNs that identify the users + to be described. + + """ + params = {'IamUserArns': iam_user_arns, } + return self.make_request(action='DescribeUserProfiles', + body=json.dumps(params)) + + def describe_volumes(self, instance_id=None, raid_array_id=None, + volume_ids=None): + """ + Describes an instance's Amazon EBS volumes. + + You must specify at least one of the parameters. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeVolumes` returns descriptions of the volumes associated + with the specified instance. + + :type raid_array_id: string + :param raid_array_id: The RAID array ID. If you use this parameter, + `DescribeVolumes` returns descriptions of the volumes associated + with the specified RAID array. + + :type volume_ids: list + :param volume_ids: Am array of volume IDs. If you use this parameter, + `DescribeVolumes` returns descriptions of the specified volumes. + Otherwise, it returns a description of every volume. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if raid_array_id is not None: + params['RaidArrayId'] = raid_array_id + if volume_ids is not None: + params['VolumeIds'] = volume_ids + return self.make_request(action='DescribeVolumes', + body=json.dumps(params)) + + def detach_elastic_load_balancer(self, elastic_load_balancer_name, + layer_id): + """ + Detaches a specified Elastic Load Balancing instance from it's + layer. + + :type elastic_load_balancer_name: string + :param elastic_load_balancer_name: The Elastic Load Balancing + instance's name. + + :type layer_id: string + :param layer_id: The ID of the layer that the Elastic Load Balancing + instance is attached to. + + """ + params = { + 'ElasticLoadBalancerName': elastic_load_balancer_name, + 'LayerId': layer_id, + } + return self.make_request(action='DetachElasticLoadBalancer', + body=json.dumps(params)) + + def get_hostname_suggestion(self, layer_id): + """ + Gets a generated host name for the specified layer, based on + the current host name theme. + + :type layer_id: string + :param layer_id: The layer ID. + + """ + params = {'LayerId': layer_id, } + return self.make_request(action='GetHostnameSuggestion', + body=json.dumps(params)) + + def reboot_instance(self, instance_id): + """ + Reboots a specified instance. For more information, see + `Starting, Stopping, and Rebooting Instances`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='RebootInstance', + body=json.dumps(params)) + + def set_load_based_auto_scaling(self, layer_id, enable=None, + up_scaling=None, down_scaling=None): + """ + Specify the load-based auto scaling configuration for a + specified layer. For more information, see `Managing Load with + Time-based and Load-based Instances`_. + + To use load-based auto scaling, you must create a set of load- + based auto scaling instances. Load-based auto scaling operates + only on the instances from that set, so you must ensure that + you have created enough instances to handle the maximum + anticipated load. + + :type layer_id: string + :param layer_id: The layer ID. + + :type enable: boolean + :param enable: Enables load-based auto scaling for the layer. + + :type up_scaling: dict + :param up_scaling: An `AutoScalingThresholds` object with the upscaling + threshold configuration. If the load exceeds these thresholds for a + specified amount of time, AWS OpsWorks starts a specified number of + instances. + + :type down_scaling: dict + :param down_scaling: An `AutoScalingThresholds` object with the + downscaling threshold configuration. If the load falls below these + thresholds for a specified amount of time, AWS OpsWorks stops a + specified number of instances. + + """ + params = {'LayerId': layer_id, } + if enable is not None: + params['Enable'] = enable + if up_scaling is not None: + params['UpScaling'] = up_scaling + if down_scaling is not None: + params['DownScaling'] = down_scaling + return self.make_request(action='SetLoadBasedAutoScaling', + body=json.dumps(params)) + + def set_permission(self, stack_id, iam_user_arn, allow_ssh=None, + allow_sudo=None): + """ + Specifies a stack's permissions. For more information, see + `Security and Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + :type allow_ssh: boolean + :param allow_ssh: The user is allowed to use SSH to communicate with + the instance. + + :type allow_sudo: boolean + :param allow_sudo: The user is allowed to use **sudo** to elevate + privileges. + + """ + params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, } + if allow_ssh is not None: + params['AllowSsh'] = allow_ssh + if allow_sudo is not None: + params['AllowSudo'] = allow_sudo + return self.make_request(action='SetPermission', + body=json.dumps(params)) + + def set_time_based_auto_scaling(self, instance_id, + auto_scaling_schedule=None): + """ + Specify the time-based auto scaling configuration for a + specified instance. For more information, see `Managing Load + with Time-based and Load-based Instances`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type auto_scaling_schedule: dict + :param auto_scaling_schedule: An `AutoScalingSchedule` with the + instance schedule. + + """ + params = {'InstanceId': instance_id, } + if auto_scaling_schedule is not None: + params['AutoScalingSchedule'] = auto_scaling_schedule + return self.make_request(action='SetTimeBasedAutoScaling', + body=json.dumps(params)) + + def start_instance(self, instance_id): + """ + Starts a specified instance. For more information, see + `Starting, Stopping, and Rebooting Instances`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='StartInstance', + body=json.dumps(params)) + + def start_stack(self, stack_id): + """ + Starts stack's instances. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='StartStack', + body=json.dumps(params)) + + def stop_instance(self, instance_id): + """ + Stops a specified instance. When you stop a standard instance, + the data disappears and must be reinstalled when you restart + the instance. You can stop an Amazon EBS-backed instance + without losing data. For more information, see `Starting, + Stopping, and Rebooting Instances`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='StopInstance', + body=json.dumps(params)) + + def stop_stack(self, stack_id): + """ + Stops a specified stack. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='StopStack', + body=json.dumps(params)) + + def update_app(self, app_id, name=None, description=None, type=None, + app_source=None, domains=None, enable_ssl=None, + ssl_configuration=None, attributes=None): + """ + Updates a specified app. + + :type app_id: string + :param app_id: The app ID. + + :type name: string + :param name: The app name. + + :type description: string + :param description: A description of the app. + + :type type: string + :param type: The app type. + + :type app_source: dict + :param app_source: A `Source` object that specifies the app repository. + + :type domains: list + :param domains: The app's virtual host settings, with multiple domains + separated by commas. For example: `'www.example.com, example.com'` + + :type enable_ssl: boolean + :param enable_ssl: Whether SSL is enabled for the app. + + :type ssl_configuration: dict + :param ssl_configuration: An `SslConfiguration` object with the SSL + configuration. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes bag. + + """ + params = {'AppId': app_id, } + if name is not None: + params['Name'] = name + if description is not None: + params['Description'] = description + if type is not None: + params['Type'] = type + if app_source is not None: + params['AppSource'] = app_source + if domains is not None: + params['Domains'] = domains + if enable_ssl is not None: + params['EnableSsl'] = enable_ssl + if ssl_configuration is not None: + params['SslConfiguration'] = ssl_configuration + if attributes is not None: + params['Attributes'] = attributes + return self.make_request(action='UpdateApp', + body=json.dumps(params)) + + def update_instance(self, instance_id, layer_ids=None, + instance_type=None, auto_scaling_type=None, + hostname=None, os=None, ami_id=None, + ssh_key_name=None, architecture=None, + install_updates_on_boot=None): + """ + Updates a specified instance. + + :type instance_id: string + :param instance_id: The instance ID. + + :type layer_ids: list + :param layer_ids: The instance's layer IDs. + + :type instance_type: string + :param instance_type: The instance type. AWS OpsWorks supports all + instance types except Cluster Compute, Cluster GPU, and High Memory + Cluster. For more information, see `Instance Families and Types`_. + The parameter values that you use to specify the various types are + in the API Name column of the Available Instance Types table. + + :type auto_scaling_type: string + :param auto_scaling_type: + The instance's auto scaling type, which has three possible values: + + + + **AlwaysRunning**: A 24/7 instance, which is not affected by auto + scaling. + + **TimeBasedAutoScaling**: A time-based auto scaling instance, which + is started and stopped based on a specified schedule. + + **LoadBasedAutoScaling**: A load-based auto scaling instance, which + is started and stopped based on load metrics. + + :type hostname: string + :param hostname: The instance host name. + + :type os: string + :param os: The instance operating system, which must be set to one of + the following. + + + Standard operating systems: `Amazon Linux` or `Ubuntu 12.04 LTS` + + Custom AMIs: `Custom` + + + The default option is `Amazon Linux`. If you set this parameter to + `Custom`, you must use the CreateInstance action's AmiId parameter + to specify the custom AMI that you want to use. For more + information on the standard operating systems, see `Operating + Systems`_For more information on how to use custom AMIs with + OpsWorks, see `Using Custom AMIs`_. + + :type ami_id: string + :param ami_id: A custom AMI ID to be used to create the instance. The + AMI should be based on one of the standard AWS OpsWorks APIs: + Amazon Linux or Ubuntu 12.04 LTS. For more information, see + `Instances`_ + + :type ssh_key_name: string + :param ssh_key_name: The instance SSH key name. + + :type architecture: string + :param architecture: The instance architecture. Instance types do not + necessarily support both architectures. For a list of the + architectures that are supported by the different instance types, + see `Instance Families and Types`_. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + """ + params = {'InstanceId': instance_id, } + if layer_ids is not None: + params['LayerIds'] = layer_ids + if instance_type is not None: + params['InstanceType'] = instance_type + if auto_scaling_type is not None: + params['AutoScalingType'] = auto_scaling_type + if hostname is not None: + params['Hostname'] = hostname + if os is not None: + params['Os'] = os + if ami_id is not None: + params['AmiId'] = ami_id + if ssh_key_name is not None: + params['SshKeyName'] = ssh_key_name + if architecture is not None: + params['Architecture'] = architecture + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + return self.make_request(action='UpdateInstance', + body=json.dumps(params)) + + def update_layer(self, layer_id, name=None, shortname=None, + attributes=None, custom_instance_profile_arn=None, + custom_security_group_ids=None, packages=None, + volume_configurations=None, enable_auto_healing=None, + auto_assign_elastic_ips=None, custom_recipes=None, + install_updates_on_boot=None): + """ + Updates a specified layer. + + :type layer_id: string + :param layer_id: The layer ID. + + :type name: string + :param name: The layer name, which is used by the console. + + :type shortname: string + :param shortname: The layer short name, which is used internally by AWS + OpsWorksand by Chef. The short name is also used as the name for + the directory where your app files are installed. It can have a + maximum of 200 characters and must be in the following format: + /\A[a-z0-9\-\_\.]+\Z/. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes bag. + + :type custom_instance_profile_arn: string + :param custom_instance_profile_arn: The ARN of an IAM profile to be + used for all of the layer's EC2 instances. For more information + about IAM ARNs, see `Using Identifiers`_. + + :type custom_security_group_ids: list + :param custom_security_group_ids: An array containing the layer's + custom security group IDs. + + :type packages: list + :param packages: An array of `Package` objects that describe the + layer's packages. + + :type volume_configurations: list + :param volume_configurations: A `VolumeConfigurations` object that + describes the layer's Amazon EBS volumes. + + :type enable_auto_healing: boolean + :param enable_auto_healing: Whether to disable auto healing for the + layer. + + :type auto_assign_elastic_ips: boolean + :param auto_assign_elastic_ips: Whether to automatically assign an + `Elastic IP address`_ to the layer. + + :type custom_recipes: dict + :param custom_recipes: A `LayerCustomRecipes` object that specifies the + layer's custom recipes. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + """ + params = {'LayerId': layer_id, } + if name is not None: + params['Name'] = name + if shortname is not None: + params['Shortname'] = shortname + if attributes is not None: + params['Attributes'] = attributes + if custom_instance_profile_arn is not None: + params['CustomInstanceProfileArn'] = custom_instance_profile_arn + if custom_security_group_ids is not None: + params['CustomSecurityGroupIds'] = custom_security_group_ids + if packages is not None: + params['Packages'] = packages + if volume_configurations is not None: + params['VolumeConfigurations'] = volume_configurations + if enable_auto_healing is not None: + params['EnableAutoHealing'] = enable_auto_healing + if auto_assign_elastic_ips is not None: + params['AutoAssignElasticIps'] = auto_assign_elastic_ips + if custom_recipes is not None: + params['CustomRecipes'] = custom_recipes + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + return self.make_request(action='UpdateLayer', + body=json.dumps(params)) + + def update_stack(self, stack_id, name=None, attributes=None, + service_role_arn=None, + default_instance_profile_arn=None, default_os=None, + hostname_theme=None, default_availability_zone=None, + default_subnet_id=None, custom_json=None, + configuration_manager=None, use_custom_cookbooks=None, + custom_cookbooks_source=None, default_ssh_key_name=None, + default_root_device_type=None): + """ + Updates a specified stack. + + :type stack_id: string + :param stack_id: The stack ID. + + :type name: string + :param name: The stack's new name. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes bag. + + :type service_role_arn: string + :param service_role_arn: + The stack AWS Identity and Access Management (IAM) role, which allows + AWS OpsWorks to work with AWS resources on your behalf. You must + set this parameter to the Amazon Resource Name (ARN) for an + existing IAM role. For more information about IAM ARNs, see `Using + Identifiers`_. + + You must set this parameter to a valid service role ARN or the action + will fail; there is no default value. You can specify the stack's + current service role ARN, if you prefer, but you must do so + explicitly. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The stack's default operating system, which must be + set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is + `Amazon Linux`. + + :type hostname_theme: string + :param hostname_theme: The stack's new host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + Layer_Dependent, which creates host names by appending integers to + the layer's short name. The other themes are: + + + Baked_Goods + + Clouds + + European_Cities + + Fruits + + Greek_Deities + + Legendary_Creatures_from_Japan + + Planets_and_Moons + + Roman_Deities + + Scottish_Islands + + US_Cities + + Wild_Cats + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The stack's default Availability + Zone, which must be in the specified region. For more information, + see `Regions and Endpoints`_. If you also specify a value for + `DefaultSubnetId`, the subnet must be in the same zone. For more + information, see CreateStack. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default subnet ID. All instances + will be launched into this subnet unless you specify otherwise when + you create the instance. If you also specify a value for + `DefaultAvailabilityZone`, the subnet must be in that zone. For + more information, see CreateStack. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: `"{\"key1\": + \"value1\", \"key2\": \"value2\",...}"` + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you + update a stack you can optionally use the configuration manager to + specify the Chef version, 0.9 or 11.4. If you omit this parameter, + AWS OpsWorks does not change the Chef version. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether the stack uses custom cookbooks. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the cloned stack, but + you can override it when you create an instance. For more + information, see `Storage for the Root Device`_. + + """ + params = {'StackId': stack_id, } + if name is not None: + params['Name'] = name + if attributes is not None: + params['Attributes'] = attributes + if service_role_arn is not None: + params['ServiceRoleArn'] = service_role_arn + if default_instance_profile_arn is not None: + params['DefaultInstanceProfileArn'] = default_instance_profile_arn + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + return self.make_request(action='UpdateStack', + body=json.dumps(params)) + + def update_user_profile(self, iam_user_arn, ssh_username=None, + ssh_public_key=None): + """ + Updates a specified user profile. + + :type iam_user_arn: string + :param iam_user_arn: The user IAM ARN. + + :type ssh_username: string + :param ssh_username: The user's new SSH user name. + + :type ssh_public_key: string + :param ssh_public_key: The user's new SSH public key. + + """ + params = {'IamUserArn': iam_user_arn, } + if ssh_username is not None: + params['SshUsername'] = ssh_username + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + return self.make_request(action='UpdateUserProfile', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read() + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/awx/lib/site-packages/boto/plugin.py b/awx/lib/site-packages/boto/plugin.py new file mode 100644 index 0000000000..f8b592cced --- /dev/null +++ b/awx/lib/site-packages/boto/plugin.py @@ -0,0 +1,90 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +""" +Implements plugin related api. + +To define a new plugin just subclass Plugin, like this. + +class AuthPlugin(Plugin): + pass + +Then start creating subclasses of your new plugin. + +class MyFancyAuth(AuthPlugin): + capability = ['sign', 'vmac'] + +The actual interface is duck typed. + +""" + +import glob +import imp, os.path + +class Plugin(object): + """Base class for all plugins.""" + + capability = [] + + @classmethod + def is_capable(cls, requested_capability): + """Returns true if the requested capability is supported by this plugin + """ + for c in requested_capability: + if not c in cls.capability: + return False + return True + +def get_plugin(cls, requested_capability=None): + if not requested_capability: + requested_capability = [] + result = [] + for handler in cls.__subclasses__(): + if handler.is_capable(requested_capability): + result.append(handler) + return result + +def _import_module(filename): + (path, name) = os.path.split(filename) + (name, ext) = os.path.splitext(name) + + (file, filename, data) = imp.find_module(name, [path]) + try: + return imp.load_module(name, file, filename, data) + finally: + if file: + file.close() + +_plugin_loaded = False + +def load_plugins(config): + global _plugin_loaded + if _plugin_loaded: + return + _plugin_loaded = True + + if not config.has_option('Plugin', 'plugin_directory'): + return + directory = config.get('Plugin', 'plugin_directory') + for file in glob.glob(os.path.join(directory, '*.py')): + _import_module(file) + diff --git a/awx/lib/site-packages/boto/provider.py b/awx/lib/site-packages/boto/provider.py new file mode 100644 index 0000000000..e27247cd3a --- /dev/null +++ b/awx/lib/site-packages/boto/provider.py @@ -0,0 +1,352 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright 2010 Google Inc. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +This class encapsulates the provider-specific header differences. +""" + +import os +from datetime import datetime + +import boto +from boto import config +from boto.gs.acl import ACL +from boto.gs.acl import CannedACLStrings as CannedGSACLStrings +from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings +from boto.s3.acl import Policy + + +HEADER_PREFIX_KEY = 'header_prefix' +METADATA_PREFIX_KEY = 'metadata_prefix' + +AWS_HEADER_PREFIX = 'x-amz-' +GOOG_HEADER_PREFIX = 'x-goog-' + +ACL_HEADER_KEY = 'acl-header' +AUTH_HEADER_KEY = 'auth-header' +COPY_SOURCE_HEADER_KEY = 'copy-source-header' +COPY_SOURCE_VERSION_ID_HEADER_KEY = 'copy-source-version-id-header' +COPY_SOURCE_RANGE_HEADER_KEY = 'copy-source-range-header' +DELETE_MARKER_HEADER_KEY = 'delete-marker-header' +DATE_HEADER_KEY = 'date-header' +METADATA_DIRECTIVE_HEADER_KEY = 'metadata-directive-header' +RESUMABLE_UPLOAD_HEADER_KEY = 'resumable-upload-header' +SECURITY_TOKEN_HEADER_KEY = 'security-token-header' +STORAGE_CLASS_HEADER_KEY = 'storage-class' +MFA_HEADER_KEY = 'mfa-header' +SERVER_SIDE_ENCRYPTION_KEY = 'server-side-encryption-header' +VERSION_ID_HEADER_KEY = 'version-id-header' + +STORAGE_COPY_ERROR = 'StorageCopyError' +STORAGE_CREATE_ERROR = 'StorageCreateError' +STORAGE_DATA_ERROR = 'StorageDataError' +STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError' +STORAGE_RESPONSE_ERROR = 'StorageResponseError' + + +class Provider(object): + + CredentialMap = { + 'aws': ('aws_access_key_id', 'aws_secret_access_key'), + 'google': ('gs_access_key_id', 'gs_secret_access_key'), + } + + AclClassMap = { + 'aws': Policy, + 'google': ACL + } + + CannedAclsMap = { + 'aws': CannedS3ACLStrings, + 'google': CannedGSACLStrings + } + + HostKeyMap = { + 'aws': 's3', + 'google': 'gs' + } + + ChunkedTransferSupport = { + 'aws': False, + 'google': True + } + + MetadataServiceSupport = { + 'aws': True, + 'google': False + } + + # If you update this map please make sure to put "None" for the + # right-hand-side for any headers that don't apply to a provider, rather + # than simply leaving that header out (which would cause KeyErrors). + HeaderInfoMap = { + 'aws': { + HEADER_PREFIX_KEY: AWS_HEADER_PREFIX, + METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-', + ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl', + AUTH_HEADER_KEY: 'AWS', + COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source', + COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + + 'copy-source-version-id', + COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX + + 'copy-source-range', + DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date', + DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker', + METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX + + 'metadata-directive', + RESUMABLE_UPLOAD_HEADER_KEY: None, + SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token', + SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX + + 'server-side-encryption', + VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id', + STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class', + MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa', + }, + 'google': { + HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX, + METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-', + ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl', + AUTH_HEADER_KEY: 'GOOG1', + COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source', + COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + + 'copy-source-version-id', + COPY_SOURCE_RANGE_HEADER_KEY: None, + DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date', + DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker', + METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX + + 'metadata-directive', + RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable', + SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token', + SERVER_SIDE_ENCRYPTION_KEY: None, + # Note that this version header is not to be confused with + # the Google Cloud Storage 'x-goog-api-version' header. + VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id', + STORAGE_CLASS_HEADER_KEY: None, + MFA_HEADER_KEY: None, + } + } + + ErrorMap = { + 'aws': { + STORAGE_COPY_ERROR: boto.exception.S3CopyError, + STORAGE_CREATE_ERROR: boto.exception.S3CreateError, + STORAGE_DATA_ERROR: boto.exception.S3DataError, + STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError, + STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError, + }, + 'google': { + STORAGE_COPY_ERROR: boto.exception.GSCopyError, + STORAGE_CREATE_ERROR: boto.exception.GSCreateError, + STORAGE_DATA_ERROR: boto.exception.GSDataError, + STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError, + STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError, + } + } + + def __init__(self, name, access_key=None, secret_key=None, + security_token=None): + self.host = None + self.port = None + self.host_header = None + self.access_key = access_key + self.secret_key = secret_key + self.security_token = security_token + self.name = name + self.acl_class = self.AclClassMap[self.name] + self.canned_acls = self.CannedAclsMap[self.name] + self._credential_expiry_time = None + self.get_credentials(access_key, secret_key) + self.configure_headers() + self.configure_errors() + # Allow config file to override default host and port. + host_opt_name = '%s_host' % self.HostKeyMap[self.name] + if config.has_option('Credentials', host_opt_name): + self.host = config.get('Credentials', host_opt_name) + port_opt_name = '%s_port' % self.HostKeyMap[self.name] + if config.has_option('Credentials', port_opt_name): + self.port = config.getint('Credentials', port_opt_name) + host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name] + if config.has_option('Credentials', host_header_opt_name): + self.host_header = config.get('Credentials', host_header_opt_name) + + def get_access_key(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._access_key + + def set_access_key(self, value): + self._access_key = value + + access_key = property(get_access_key, set_access_key) + + def get_secret_key(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._secret_key + + def set_secret_key(self, value): + self._secret_key = value + + secret_key = property(get_secret_key, set_secret_key) + + def get_security_token(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._security_token + + def set_security_token(self, value): + self._security_token = value + + security_token = property(get_security_token, set_security_token) + + def _credentials_need_refresh(self): + if self._credential_expiry_time is None: + return False + else: + # The credentials should be refreshed if they're going to expire + # in less than 5 minutes. + delta = self._credential_expiry_time - datetime.utcnow() + # python2.6 does not have timedelta.total_seconds() so we have + # to calculate this ourselves. This is straight from the + # datetime docs. + seconds_left = ( + (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) + * 10**6) / 10**6) + if seconds_left < (5 * 60): + boto.log.debug("Credentials need to be refreshed.") + return True + else: + return False + + def get_credentials(self, access_key=None, secret_key=None): + access_key_name, secret_key_name = self.CredentialMap[self.name] + if access_key is not None: + self.access_key = access_key + boto.log.debug("Using access key provided by client.") + elif access_key_name.upper() in os.environ: + self.access_key = os.environ[access_key_name.upper()] + boto.log.debug("Using access key found in environment variable.") + elif config.has_option('Credentials', access_key_name): + self.access_key = config.get('Credentials', access_key_name) + boto.log.debug("Using access key found in config file.") + + if secret_key is not None: + self.secret_key = secret_key + boto.log.debug("Using secret key provided by client.") + elif secret_key_name.upper() in os.environ: + self.secret_key = os.environ[secret_key_name.upper()] + boto.log.debug("Using secret key found in environment variable.") + elif config.has_option('Credentials', secret_key_name): + self.secret_key = config.get('Credentials', secret_key_name) + boto.log.debug("Using secret key found in config file.") + elif config.has_option('Credentials', 'keyring'): + keyring_name = config.get('Credentials', 'keyring') + try: + import keyring + except ImportError: + boto.log.error("The keyring module could not be imported. " + "For keyring support, install the keyring " + "module.") + raise + self.secret_key = keyring.get_password( + keyring_name, self.access_key) + boto.log.debug("Using secret key found in keyring.") + + if ((self._access_key is None or self._secret_key is None) and + self.MetadataServiceSupport[self.name]): + self._populate_keys_from_metadata_server() + self._secret_key = self._convert_key_to_str(self._secret_key) + + def _populate_keys_from_metadata_server(self): + # get_instance_metadata is imported here because of a circular + # dependency. + boto.log.debug("Retrieving credentials from metadata server.") + from boto.utils import get_instance_metadata + timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0) + attempts = config.getint('Boto', 'metadata_service_num_attempts', 1) + # The num_retries arg is actually the total number of attempts made, + # so the config options is named *_num_attempts to make this more + # clear to users. + metadata = get_instance_metadata( + timeout=timeout, num_retries=attempts, + data='meta-data/iam/security-credentials') + if metadata: + # I'm assuming there's only one role on the instance profile. + security = metadata.values()[0] + self._access_key = security['AccessKeyId'] + self._secret_key = self._convert_key_to_str(security['SecretAccessKey']) + self._security_token = security['Token'] + expires_at = security['Expiration'] + self._credential_expiry_time = datetime.strptime( + expires_at, "%Y-%m-%dT%H:%M:%SZ") + boto.log.debug("Retrieved credentials will expire in %s at: %s", + self._credential_expiry_time - datetime.now(), expires_at) + + def _convert_key_to_str(self, key): + if isinstance(key, unicode): + # the secret key must be bytes and not unicode to work + # properly with hmac.new (see http://bugs.python.org/issue5285) + return str(key) + return key + + def configure_headers(self): + header_info_map = self.HeaderInfoMap[self.name] + self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY] + self.header_prefix = header_info_map[HEADER_PREFIX_KEY] + self.acl_header = header_info_map[ACL_HEADER_KEY] + self.auth_header = header_info_map[AUTH_HEADER_KEY] + self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY] + self.copy_source_version_id = header_info_map[ + COPY_SOURCE_VERSION_ID_HEADER_KEY] + self.copy_source_range_header = header_info_map[ + COPY_SOURCE_RANGE_HEADER_KEY] + self.date_header = header_info_map[DATE_HEADER_KEY] + self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY] + self.metadata_directive_header = ( + header_info_map[METADATA_DIRECTIVE_HEADER_KEY]) + self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY] + self.resumable_upload_header = ( + header_info_map[RESUMABLE_UPLOAD_HEADER_KEY]) + self.server_side_encryption_header = header_info_map[SERVER_SIDE_ENCRYPTION_KEY] + self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY] + self.version_id = header_info_map[VERSION_ID_HEADER_KEY] + self.mfa_header = header_info_map[MFA_HEADER_KEY] + + def configure_errors(self): + error_map = self.ErrorMap[self.name] + self.storage_copy_error = error_map[STORAGE_COPY_ERROR] + self.storage_create_error = error_map[STORAGE_CREATE_ERROR] + self.storage_data_error = error_map[STORAGE_DATA_ERROR] + self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR] + self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR] + + def get_provider_name(self): + return self.HostKeyMap[self.name] + + def supports_chunked_transfer(self): + return self.ChunkedTransferSupport[self.name] + +# Static utility method for getting default Provider. +def get_default(): + return Provider('aws') diff --git a/awx/lib/site-packages/boto/pyami/__init__.py b/awx/lib/site-packages/boto/pyami/__init__.py new file mode 100644 index 0000000000..303dbb66c9 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/awx/lib/site-packages/boto/pyami/bootstrap.py b/awx/lib/site-packages/boto/pyami/bootstrap.py new file mode 100644 index 0000000000..cd44682fc8 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/bootstrap.py @@ -0,0 +1,134 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +import boto +from boto.utils import get_instance_metadata, get_instance_userdata +from boto.pyami.config import Config, BotoConfigPath +from boto.pyami.scriptbase import ScriptBase +import time + +class Bootstrap(ScriptBase): + """ + The Bootstrap class is instantiated and run as part of the PyAMI + instance initialization process. The methods in this class will + be run from the rc.local script of the instance and will be run + as the root user. + + The main purpose of this class is to make sure the boto distribution + on the instance is the one required. + """ + + def __init__(self): + self.working_dir = '/mnt/pyami' + self.write_metadata() + ScriptBase.__init__(self) + + def write_metadata(self): + fp = open(os.path.expanduser(BotoConfigPath), 'w') + fp.write('[Instance]\n') + inst_data = get_instance_metadata() + for key in inst_data: + fp.write('%s = %s\n' % (key, inst_data[key])) + user_data = get_instance_userdata() + fp.write('\n%s\n' % user_data) + fp.write('[Pyami]\n') + fp.write('working_dir = %s\n' % self.working_dir) + fp.close() + # This file has the AWS credentials, should we lock it down? + # os.chmod(BotoConfigPath, stat.S_IREAD | stat.S_IWRITE) + # now that we have written the file, read it into a pyami Config object + boto.config = Config() + boto.init_logging() + + def create_working_dir(self): + boto.log.info('Working directory: %s' % self.working_dir) + if not os.path.exists(self.working_dir): + os.mkdir(self.working_dir) + + def load_boto(self): + update = boto.config.get('Boto', 'boto_update', 'svn:HEAD') + if update.startswith('svn'): + if update.find(':') >= 0: + method, version = update.split(':') + version = '-r%s' % version + else: + version = '-rHEAD' + location = boto.config.get('Boto', 'boto_location', '/usr/local/boto') + self.run('svn update %s %s' % (version, location)) + elif update.startswith('git'): + location = boto.config.get('Boto', 'boto_location', '/usr/share/python-support/python-boto/boto') + num_remaining_attempts = 10 + while num_remaining_attempts > 0: + num_remaining_attempts -= 1 + try: + self.run('git pull', cwd=location) + num_remaining_attempts = 0 + except Exception, e: + boto.log.info('git pull attempt failed with the following exception. Trying again in a bit. %s', e) + time.sleep(2) + if update.find(':') >= 0: + method, version = update.split(':') + else: + version = 'master' + self.run('git checkout %s' % version, cwd=location) + else: + # first remove the symlink needed when running from subversion + self.run('rm /usr/local/lib/python2.5/site-packages/boto') + self.run('easy_install %s' % update) + + def fetch_s3_file(self, s3_file): + try: + from boto.utils import fetch_file + f = fetch_file(s3_file) + path = os.path.join(self.working_dir, s3_file.split("/")[-1]) + open(path, "w").write(f.read()) + except: + boto.log.exception('Problem Retrieving file: %s' % s3_file) + path = None + return path + + def load_packages(self): + package_str = boto.config.get('Pyami', 'packages') + if package_str: + packages = package_str.split(',') + for package in packages: + package = package.strip() + if package.startswith('s3:'): + package = self.fetch_s3_file(package) + if package: + # if the "package" is really a .py file, it doesn't have to + # be installed, just being in the working dir is enough + if not package.endswith('.py'): + self.run('easy_install -Z %s' % package, exit_on_error=False) + + def main(self): + self.create_working_dir() + self.load_boto() + self.load_packages() + self.notify('Bootstrap Completed for %s' % boto.config.get_instance('instance-id')) + +if __name__ == "__main__": + # because bootstrap starts before any logging configuration can be loaded from + # the boto config files, we will manually enable logging to /var/log/boto.log + boto.set_file_logger('bootstrap', '/var/log/boto.log') + bs = Bootstrap() + bs.main() diff --git a/awx/lib/site-packages/boto/pyami/config.py b/awx/lib/site-packages/boto/pyami/config.py new file mode 100644 index 0000000000..08da6581e5 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/config.py @@ -0,0 +1,229 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import StringIO, os, re +import warnings +import ConfigParser +import boto + +# If running in Google App Engine there is no "user" and +# os.path.expanduser() will fail. Attempt to detect this case and use a +# no-op expanduser function in this case. +try: + os.path.expanduser('~') + expanduser = os.path.expanduser +except (AttributeError, ImportError): + # This is probably running on App Engine. + expanduser = (lambda x: x) + +# By default we use two locations for the boto configurations, +# /etc/boto.cfg and ~/.boto (which works on Windows and Unix). +BotoConfigPath = '/etc/boto.cfg' +BotoConfigLocations = [BotoConfigPath] +UserConfigPath = os.path.join(expanduser('~'), '.boto') +BotoConfigLocations.append(UserConfigPath) + +# If there's a BOTO_CONFIG variable set, we load ONLY +# that variable +if 'BOTO_CONFIG' in os.environ: + BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])] + +# If there's a BOTO_PATH variable set, we use anything there +# as the current configuration locations, split with colons +elif 'BOTO_PATH' in os.environ: + BotoConfigLocations = [] + for path in os.environ['BOTO_PATH'].split(":"): + BotoConfigLocations.append(expanduser(path)) + + +class Config(ConfigParser.SafeConfigParser): + + def __init__(self, path=None, fp=None, do_load=True): + ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami', + 'debug' : '0'}) + if do_load: + if path: + self.load_from_path(path) + elif fp: + self.readfp(fp) + else: + self.read(BotoConfigLocations) + if "AWS_CREDENTIAL_FILE" in os.environ: + full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE']) + try: + self.load_credential_file(full_path) + except IOError: + warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path) + + def load_credential_file(self, path): + """Load a credential file as is setup like the Java utilities""" + c_data = StringIO.StringIO() + c_data.write("[Credentials]\n") + for line in open(path, "r").readlines(): + c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key")) + c_data.seek(0) + self.readfp(c_data) + + def load_from_path(self, path): + file = open(path) + for line in file.readlines(): + match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line) + if match: + extended_file = match.group(1) + (dir, file) = os.path.split(path) + self.load_from_path(os.path.join(dir, extended_file)) + self.read(path) + + def save_option(self, path, section, option, value): + """ + Write the specified Section.Option to the config file specified by path. + Replace any previous value. If the path doesn't exist, create it. + Also add the option the the in-memory config. + """ + config = ConfigParser.SafeConfigParser() + config.read(path) + if not config.has_section(section): + config.add_section(section) + config.set(section, option, value) + fp = open(path, 'w') + config.write(fp) + fp.close() + if not self.has_section(section): + self.add_section(section) + self.set(section, option, value) + + def save_user_option(self, section, option, value): + self.save_option(UserConfigPath, section, option, value) + + def save_system_option(self, section, option, value): + self.save_option(BotoConfigPath, section, option, value) + + def get_instance(self, name, default=None): + try: + val = self.get('Instance', name) + except: + val = default + return val + + def get_user(self, name, default=None): + try: + val = self.get('User', name) + except: + val = default + return val + + def getint_user(self, name, default=0): + try: + val = self.getint('User', name) + except: + val = default + return val + + def get_value(self, section, name, default=None): + return self.get(section, name, default) + + def get(self, section, name, default=None): + try: + val = ConfigParser.SafeConfigParser.get(self, section, name) + except: + val = default + return val + + def getint(self, section, name, default=0): + try: + val = ConfigParser.SafeConfigParser.getint(self, section, name) + except: + val = int(default) + return val + + def getfloat(self, section, name, default=0.0): + try: + val = ConfigParser.SafeConfigParser.getfloat(self, section, name) + except: + val = float(default) + return val + + def getbool(self, section, name, default=False): + if self.has_option(section, name): + val = self.get(section, name) + if val.lower() == 'true': + val = True + else: + val = False + else: + val = default + return val + + def setbool(self, section, name, value): + if value: + self.set(section, name, 'true') + else: + self.set(section, name, 'false') + + def dump(self): + s = StringIO.StringIO() + self.write(s) + print s.getvalue() + + def dump_safe(self, fp=None): + if not fp: + fp = StringIO.StringIO() + for section in self.sections(): + fp.write('[%s]\n' % section) + for option in self.options(section): + if option == 'aws_secret_access_key': + fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option) + else: + fp.write('%s = %s\n' % (option, self.get(section, option))) + + def dump_to_sdb(self, domain_name, item_name): + from boto.compat import json + sdb = boto.connect_sdb() + domain = sdb.lookup(domain_name) + if not domain: + domain = sdb.create_domain(domain_name) + item = domain.new_item(item_name) + item.active = False + for section in self.sections(): + d = {} + for option in self.options(section): + d[option] = self.get(section, option) + item[section] = json.dumps(d) + item.save() + + def load_from_sdb(self, domain_name, item_name): + from boto.compat import json + sdb = boto.connect_sdb() + domain = sdb.lookup(domain_name) + item = domain.get_item(item_name) + for section in item.keys(): + if not self.has_section(section): + self.add_section(section) + d = json.loads(item[section]) + for attr_name in d.keys(): + attr_value = d[attr_name] + if attr_value == None: + attr_value = 'None' + if isinstance(attr_value, bool): + self.setbool(section, attr_name, attr_value) + else: + self.set(section, attr_name, attr_value) diff --git a/awx/lib/site-packages/boto/pyami/copybot.cfg b/awx/lib/site-packages/boto/pyami/copybot.cfg new file mode 100644 index 0000000000..cbfdc5ad19 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/copybot.cfg @@ -0,0 +1,60 @@ +# +# Your AWS Credentials +# +[Credentials] +aws_access_key_id = +aws_secret_access_key = + +# +# If you want to use a separate set of credentials when writing +# to the destination bucket, put them here +#dest_aws_access_key_id = +#dest_aws_secret_access_key = + +# +# Fill out this section if you want emails from CopyBot +# when it starts and stops +# +[Notification] +#smtp_host = +#smtp_user = +#smtp_pass = +#smtp_from = +#smtp_to = + +# +# If you leave this section as is, it will automatically +# update boto from subversion upon start up. +# If you don't want that to happen, comment this out +# +[Boto] +boto_location = /usr/local/boto +boto_update = svn:HEAD + +# +# This tells the Pyami code in boto what scripts +# to run during startup +# +[Pyami] +scripts = boto.pyami.copybot.CopyBot + +# +# Source bucket and Destination Bucket, obviously. +# If the Destination bucket does not exist, it will +# attempt to create it. +# If exit_on_completion is false, the instance +# will keep running after the copy operation is +# complete which might be handy for debugging. +# If copy_acls is false, the ACL's will not be +# copied with the objects to the new bucket. +# If replace_dst is false, copybot will not +# will only store the source file in the dest if +# that file does not already exist. If it's true +# it will replace it even if it does exist. +# +[CopyBot] +src_bucket = +dst_bucket = +exit_on_completion = true +copy_acls = true +replace_dst = true diff --git a/awx/lib/site-packages/boto/pyami/copybot.py b/awx/lib/site-packages/boto/pyami/copybot.py new file mode 100644 index 0000000000..ed397cb761 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/copybot.py @@ -0,0 +1,97 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto +from boto.pyami.scriptbase import ScriptBase +import os, StringIO + +class CopyBot(ScriptBase): + + def __init__(self): + ScriptBase.__init__(self) + self.wdir = boto.config.get('Pyami', 'working_dir') + self.log_file = '%s.log' % self.instance_id + self.log_path = os.path.join(self.wdir, self.log_file) + boto.set_file_logger(self.name, self.log_path) + self.src_name = boto.config.get(self.name, 'src_bucket') + self.dst_name = boto.config.get(self.name, 'dst_bucket') + self.replace = boto.config.getbool(self.name, 'replace_dst', True) + s3 = boto.connect_s3() + self.src = s3.lookup(self.src_name) + if not self.src: + boto.log.error('Source bucket does not exist: %s' % self.src_name) + dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None) + if dest_access_key: + dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None) + s3 = boto.connect(dest_access_key, dest_secret_key) + self.dst = s3.lookup(self.dst_name) + if not self.dst: + self.dst = s3.create_bucket(self.dst_name) + + def copy_bucket_acl(self): + if boto.config.get(self.name, 'copy_acls', True): + acl = self.src.get_xml_acl() + self.dst.set_xml_acl(acl) + + def copy_key_acl(self, src, dst): + if boto.config.get(self.name, 'copy_acls', True): + acl = src.get_xml_acl() + dst.set_xml_acl(acl) + + def copy_keys(self): + boto.log.info('src=%s' % self.src.name) + boto.log.info('dst=%s' % self.dst.name) + try: + for key in self.src: + if not self.replace: + exists = self.dst.lookup(key.name) + if exists: + boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name)) + continue + boto.log.info('copying %d bytes from key=%s' % (key.size, key.name)) + prefix, base = os.path.split(key.name) + path = os.path.join(self.wdir, base) + key.get_contents_to_filename(path) + new_key = self.dst.new_key(key.name) + new_key.set_contents_from_filename(path) + self.copy_key_acl(key, new_key) + os.unlink(path) + except: + boto.log.exception('Error copying key: %s' % key.name) + + def copy_log(self): + key = self.dst.new_key(self.log_file) + key.set_contents_from_filename(self.log_path) + + def main(self): + fp = StringIO.StringIO() + boto.config.dump_safe(fp) + self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue()) + if self.src and self.dst: + self.copy_keys() + if self.dst: + self.copy_log() + self.notify('%s (%s) Stopping' % (self.name, self.instance_id), + 'Copy Operation Complete') + if boto.config.getbool(self.name, 'exit_on_completion', True): + ec2 = boto.connect_ec2() + ec2.terminate_instances([self.instance_id]) + diff --git a/awx/lib/site-packages/boto/pyami/helloworld.py b/awx/lib/site-packages/boto/pyami/helloworld.py new file mode 100644 index 0000000000..680873ce17 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/helloworld.py @@ -0,0 +1,28 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.scriptbase import ScriptBase + +class HelloWorld(ScriptBase): + + def main(self): + self.log('Hello World!!!') + diff --git a/awx/lib/site-packages/boto/pyami/installers/__init__.py b/awx/lib/site-packages/boto/pyami/installers/__init__.py new file mode 100644 index 0000000000..cc689264bc --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/installers/__init__.py @@ -0,0 +1,64 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.scriptbase import ScriptBase + + +class Installer(ScriptBase): + """ + Abstract base class for installers + """ + + def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None): + """ + Add an entry to the system crontab. + """ + raise NotImplementedError + + def add_init_script(self, file): + """ + Add this file to the init.d directory + """ + + def add_env(self, key, value): + """ + Add an environemnt variable + """ + raise NotImplementedError + + def stop(self, service_name): + """ + Stop a service. + """ + raise NotImplementedError + + def start(self, service_name): + """ + Start a service. + """ + raise NotImplementedError + + def install(self): + """ + Do whatever is necessary to "install" the package. + """ + raise NotImplementedError + diff --git a/awx/lib/site-packages/boto/pyami/installers/ubuntu/__init__.py b/awx/lib/site-packages/boto/pyami/installers/ubuntu/__init__.py new file mode 100644 index 0000000000..60ee658e34 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/installers/ubuntu/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/awx/lib/site-packages/boto/pyami/installers/ubuntu/apache.py b/awx/lib/site-packages/boto/pyami/installers/ubuntu/apache.py new file mode 100644 index 0000000000..febc2dfa25 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/installers/ubuntu/apache.py @@ -0,0 +1,43 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.installers.ubuntu.installer import Installer + +class Apache(Installer): + """ + Install apache2, mod_python, and libapache2-svn + """ + + def install(self): + self.run("apt-get update") + self.run('apt-get -y install apache2', notify=True, exit_on_error=True) + self.run('apt-get -y install libapache2-mod-python', notify=True, exit_on_error=True) + self.run('a2enmod rewrite', notify=True, exit_on_error=True) + self.run('a2enmod ssl', notify=True, exit_on_error=True) + self.run('a2enmod proxy', notify=True, exit_on_error=True) + self.run('a2enmod proxy_ajp', notify=True, exit_on_error=True) + + # Hard reboot the apache2 server to enable these module + self.stop("apache2") + self.start("apache2") + + def main(self): + self.install() diff --git a/awx/lib/site-packages/boto/pyami/installers/ubuntu/ebs.py b/awx/lib/site-packages/boto/pyami/installers/ubuntu/ebs.py new file mode 100644 index 0000000000..3e5b5c28d1 --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/installers/ubuntu/ebs.py @@ -0,0 +1,238 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +Automated installer to attach, format and mount an EBS volume. +This installer assumes that you want the volume formatted as +an XFS file system. To drive this installer, you need the +following section in the boto config passed to the new instance. +You also need to install dateutil by listing python-dateutil +in the list of packages to be installed in the Pyami seciont +of your boto config file. + +If there is already a device mounted at the specified mount point, +the installer assumes that it is the ephemeral drive and unmounts +it, remounts it as /tmp and chmods it to 777. + +Config file section:: + + [EBS] + volume_id = + logical_volume_name = + device = + mount_point = + +""" +import boto +from boto.manage.volume import Volume +from boto.exception import EC2ResponseError +import os, time +from boto.pyami.installers.ubuntu.installer import Installer +from string import Template + +BackupScriptTemplate = """#!/usr/bin/env python +# Backup EBS volume +import boto +from boto.pyami.scriptbase import ScriptBase +import traceback + +class Backup(ScriptBase): + + def main(self): + try: + ec2 = boto.connect_ec2() + self.run("/usr/sbin/xfs_freeze -f ${mount_point}", exit_on_error = True) + snapshot = ec2.create_snapshot('${volume_id}') + boto.log.info("Snapshot created: %s " % snapshot) + except Exception, e: + self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc()) + boto.log.info("Snapshot created: ${volume_id}") + except Exception, e: + self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc()) + finally: + self.run("/usr/sbin/xfs_freeze -u ${mount_point}") + +if __name__ == "__main__": + b = Backup() + b.main() +""" + +BackupCleanupScript= """#!/usr/bin/env python +import boto +from boto.manage.volume import Volume + +# Cleans Backups of EBS volumes + +for v in Volume.all(): + v.trim_snapshots(True) +""" + +TagBasedBackupCleanupScript= """#!/usr/bin/env python +import boto + +# Cleans Backups of EBS volumes + +ec2 = boto.connect_ec2() +ec2.trim_snapshots() +""" + +class EBSInstaller(Installer): + """ + Set up the EBS stuff + """ + + def __init__(self, config_file=None): + Installer.__init__(self, config_file) + self.instance_id = boto.config.get('Instance', 'instance-id') + self.device = boto.config.get('EBS', 'device', '/dev/sdp') + self.volume_id = boto.config.get('EBS', 'volume_id') + self.logical_volume_name = boto.config.get('EBS', 'logical_volume_name') + self.mount_point = boto.config.get('EBS', 'mount_point', '/ebs') + + def attach(self): + ec2 = boto.connect_ec2() + if self.logical_volume_name: + # if a logical volume was specified, override the specified volume_id + # (if there was one) with the current AWS volume for the logical volume: + logical_volume = Volume.find(name = self.logical_volume_name).next() + self.volume_id = logical_volume._volume_id + volume = ec2.get_all_volumes([self.volume_id])[0] + # wait for the volume to be available. The volume may still be being created + # from a snapshot. + while volume.update() != 'available': + boto.log.info('Volume %s not yet available. Current status = %s.' % (volume.id, volume.status)) + time.sleep(5) + instance = ec2.get_only_instances([self.instance_id])[0] + attempt_attach = True + while attempt_attach: + try: + ec2.attach_volume(self.volume_id, self.instance_id, self.device) + attempt_attach = False + except EC2ResponseError, e: + if e.error_code != 'IncorrectState': + # if there's an EC2ResonseError with the code set to IncorrectState, delay a bit for ec2 + # to realize the instance is running, then try again. Otherwise, raise the error: + boto.log.info('Attempt to attach the EBS volume %s to this instance (%s) returned %s. Trying again in a bit.' % (self.volume_id, self.instance_id, e.errors)) + time.sleep(2) + else: + raise e + boto.log.info('Attached volume %s to instance %s as device %s' % (self.volume_id, self.instance_id, self.device)) + # now wait for the volume device to appear + while not os.path.exists(self.device): + boto.log.info('%s still does not exist, waiting 2 seconds' % self.device) + time.sleep(2) + + def make_fs(self): + boto.log.info('make_fs...') + has_fs = self.run('fsck %s' % self.device) + if has_fs != 0: + self.run('mkfs -t xfs %s' % self.device) + + def create_backup_script(self): + t = Template(BackupScriptTemplate) + s = t.substitute(volume_id=self.volume_id, instance_id=self.instance_id, + mount_point=self.mount_point) + fp = open('/usr/local/bin/ebs_backup', 'w') + fp.write(s) + fp.close() + self.run('chmod +x /usr/local/bin/ebs_backup') + + def create_backup_cleanup_script(self, use_tag_based_cleanup = False): + fp = open('/usr/local/bin/ebs_backup_cleanup', 'w') + if use_tag_based_cleanup: + fp.write(TagBasedBackupCleanupScript) + else: + fp.write(BackupCleanupScript) + fp.close() + self.run('chmod +x /usr/local/bin/ebs_backup_cleanup') + + def handle_mount_point(self): + boto.log.info('handle_mount_point') + if not os.path.isdir(self.mount_point): + boto.log.info('making directory') + # mount directory doesn't exist so create it + self.run("mkdir %s" % self.mount_point) + else: + boto.log.info('directory exists already') + self.run('mount -l') + lines = self.last_command.output.split('\n') + for line in lines: + t = line.split() + if t and t[2] == self.mount_point: + # something is already mounted at the mount point + # unmount that and mount it as /tmp + if t[0] != self.device: + self.run('umount %s' % self.mount_point) + self.run('mount %s /tmp' % t[0]) + break + self.run('chmod 777 /tmp') + # Mount up our new EBS volume onto mount_point + self.run("mount %s %s" % (self.device, self.mount_point)) + self.run('xfs_growfs %s' % self.mount_point) + + def update_fstab(self): + f = open("/etc/fstab", "a") + f.write('%s\t%s\txfs\tdefaults 0 0\n' % (self.device, self.mount_point)) + f.close() + + def install(self): + # First, find and attach the volume + self.attach() + + # Install the xfs tools + self.run('apt-get -y install xfsprogs xfsdump') + + # Check to see if the filesystem was created or not + self.make_fs() + + # create the /ebs directory for mounting + self.handle_mount_point() + + # create the backup script + self.create_backup_script() + + # Set up the backup script + minute = boto.config.get('EBS', 'backup_cron_minute', '0') + hour = boto.config.get('EBS', 'backup_cron_hour', '4,16') + self.add_cron("ebs_backup", "/usr/local/bin/ebs_backup", minute=minute, hour=hour) + + # Set up the backup cleanup script + minute = boto.config.get('EBS', 'backup_cleanup_cron_minute') + hour = boto.config.get('EBS', 'backup_cleanup_cron_hour') + if (minute != None) and (hour != None): + # Snapshot clean up can either be done via the manage module, or via the new tag based + # snapshot code, if the snapshots have been tagged with the name of the associated + # volume. Check for the presence of the new configuration flag, and use the appropriate + # cleanup method / script: + use_tag_based_cleanup = boto.config.has_option('EBS', 'use_tag_based_snapshot_cleanup') + self.create_backup_cleanup_script(use_tag_based_cleanup); + self.add_cron("ebs_backup_cleanup", "/usr/local/bin/ebs_backup_cleanup", minute=minute, hour=hour) + + # Set up the fstab + self.update_fstab() + + def main(self): + if not os.path.exists(self.device): + self.install() + else: + boto.log.info("Device %s is already attached, skipping EBS Installer" % self.device) diff --git a/awx/lib/site-packages/boto/pyami/installers/ubuntu/installer.py b/awx/lib/site-packages/boto/pyami/installers/ubuntu/installer.py new file mode 100644 index 0000000000..370d63fd7b --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/installers/ubuntu/installer.py @@ -0,0 +1,96 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto.pyami.installers +import os +import os.path +import stat +import boto +import random +from pwd import getpwnam + +class Installer(boto.pyami.installers.Installer): + """ + Base Installer class for Ubuntu-based AMI's + """ + def add_cron(self, name, command, minute="*", hour="*", mday="*", month="*", wday="*", who="root", env=None): + """ + Write a file to /etc/cron.d to schedule a command + env is a dict containing environment variables you want to set in the file + name will be used as the name of the file + """ + if minute == 'random': + minute = str(random.randrange(60)) + if hour == 'random': + hour = str(random.randrange(24)) + fp = open('/etc/cron.d/%s' % name, "w") + if env: + for key, value in env.items(): + fp.write('%s=%s\n' % (key, value)) + fp.write('%s %s %s %s %s %s %s\n' % (minute, hour, mday, month, wday, who, command)) + fp.close() + + def add_init_script(self, file, name): + """ + Add this file to the init.d directory + """ + f_path = os.path.join("/etc/init.d", name) + f = open(f_path, "w") + f.write(file) + f.close() + os.chmod(f_path, stat.S_IREAD| stat.S_IWRITE | stat.S_IEXEC) + self.run("/usr/sbin/update-rc.d %s defaults" % name) + + def add_env(self, key, value): + """ + Add an environemnt variable + For Ubuntu, the best place is /etc/environment. Values placed here do + not need to be exported. + """ + boto.log.info('Adding env variable: %s=%s' % (key, value)) + if not os.path.exists("/etc/environment.orig"): + self.run('cp /etc/environment /etc/environment.orig', notify=False, exit_on_error=False) + fp = open('/etc/environment', 'a') + fp.write('\n%s="%s"' % (key, value)) + fp.close() + os.environ[key] = value + + def stop(self, service_name): + self.run('/etc/init.d/%s stop' % service_name) + + def start(self, service_name): + self.run('/etc/init.d/%s start' % service_name) + + def create_user(self, user): + """ + Create a user on the local system + """ + self.run("useradd -m %s" % user) + usr = getpwnam(user) + return usr + + + def install(self): + """ + This is the only method you need to override + """ + raise NotImplementedError + diff --git a/awx/lib/site-packages/boto/pyami/installers/ubuntu/mysql.py b/awx/lib/site-packages/boto/pyami/installers/ubuntu/mysql.py new file mode 100644 index 0000000000..490e5dbb4f --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/installers/ubuntu/mysql.py @@ -0,0 +1,109 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This installer will install mysql-server on an Ubuntu machine. +In addition to the normal installation done by apt-get, it will +also configure the new MySQL server to store it's data files in +a different location. By default, this is /mnt but that can be +configured in the [MySQL] section of the boto config file passed +to the instance. +""" +from boto.pyami.installers.ubuntu.installer import Installer +import os +import boto +from boto.utils import ShellCommand +from ConfigParser import SafeConfigParser +import time + +ConfigSection = """ +[MySQL] +root_password = +data_dir = +""" + +class MySQL(Installer): + + def install(self): + self.run('apt-get update') + self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True) + +# def set_root_password(self, password=None): +# if not password: +# password = boto.config.get('MySQL', 'root_password') +# if password: +# self.run('mysqladmin -u root password %s' % password) +# return password + + def change_data_dir(self, password=None): + data_dir = boto.config.get('MySQL', 'data_dir', '/mnt') + fresh_install = False; + is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running + is_mysql_running_command.run() + if is_mysql_running_command.getStatus() == 0: + # mysql is running. This is the state apt-get will leave it in. If it isn't running, + # that means mysql was already installed on the AMI and there's no need to stop it, + # saving 40 seconds on instance startup. + time.sleep(10) #trying to stop mysql immediately after installing it fails + # We need to wait until mysql creates the root account before we kill it + # or bad things will happen + i = 0 + while self.run("echo 'quit' | mysql -u root") != 0 and i<5: + time.sleep(5) + i = i + 1 + self.run('/etc/init.d/mysql stop') + self.run("pkill -9 mysql") + + mysql_path = os.path.join(data_dir, 'mysql') + if not os.path.exists(mysql_path): + self.run('mkdir %s' % mysql_path) + fresh_install = True; + self.run('chown -R mysql:mysql %s' % mysql_path) + fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w') + fp.write('# created by pyami\n') + fp.write('# use the %s volume for data\n' % data_dir) + fp.write('[mysqld]\n') + fp.write('datadir = %s\n' % mysql_path) + fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log')) + fp.close() + if fresh_install: + self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path) + self.start('mysql') + else: + #get the password ubuntu expects to use: + config_parser = SafeConfigParser() + config_parser.read('/etc/mysql/debian.cnf') + password = config_parser.get('client', 'password') + # start the mysql deamon, then mysql with the required grant statement piped into it: + self.start('mysql') + time.sleep(10) #time for mysql to start + grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password + while self.run(grant_command) != 0: + time.sleep(5) + # leave mysqld running + + def main(self): + self.install() + # change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i + # and changing that is too ugly to be worth it: + #self.set_root_password() + self.change_data_dir() + diff --git a/awx/lib/site-packages/boto/pyami/installers/ubuntu/trac.py b/awx/lib/site-packages/boto/pyami/installers/ubuntu/trac.py new file mode 100644 index 0000000000..ef83af7aac --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/installers/ubuntu/trac.py @@ -0,0 +1,139 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.installers.ubuntu.installer import Installer +import boto +import os + +class Trac(Installer): + """ + Install Trac and DAV-SVN + Sets up a Vhost pointing to [Trac]->home + Using the config parameter [Trac]->hostname + Sets up a trac environment for every directory found under [Trac]->data_dir + + [Trac] + name = My Foo Server + hostname = trac.foo.com + home = /mnt/sites/trac + data_dir = /mnt/trac + svn_dir = /mnt/subversion + server_admin = root@foo.com + sdb_auth_domain = users + # Optional + SSLCertificateFile = /mnt/ssl/foo.crt + SSLCertificateKeyFile = /mnt/ssl/foo.key + SSLCertificateChainFile = /mnt/ssl/FooCA.crt + + """ + + def install(self): + self.run('apt-get -y install trac', notify=True, exit_on_error=True) + self.run('apt-get -y install libapache2-svn', notify=True, exit_on_error=True) + self.run("a2enmod ssl") + self.run("a2enmod mod_python") + self.run("a2enmod dav_svn") + self.run("a2enmod rewrite") + # Make sure that boto.log is writable by everyone so that subversion post-commit hooks can + # write to it. + self.run("touch /var/log/boto.log") + self.run("chmod a+w /var/log/boto.log") + + def setup_vhost(self): + domain = boto.config.get("Trac", "hostname").strip() + if domain: + domain_info = domain.split('.') + cnf = open("/etc/apache2/sites-available/%s" % domain_info[0], "w") + cnf.write("NameVirtualHost *:80\n") + if boto.config.get("Trac", "SSLCertificateFile"): + cnf.write("NameVirtualHost *:443\n\n") + cnf.write("\n") + cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip()) + cnf.write("\tServerName %s\n" % domain) + cnf.write("\tRewriteEngine On\n") + cnf.write("\tRewriteRule ^(.*)$ https://%s$1\n" % domain) + cnf.write("\n\n") + + cnf.write("\n") + else: + cnf.write("\n") + + cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip()) + cnf.write("\tServerName %s\n" % domain) + cnf.write("\tDocumentRoot %s\n" % boto.config.get("Trac", "home").strip()) + + cnf.write("\t\n" % boto.config.get("Trac", "home").strip()) + cnf.write("\t\tOptions FollowSymLinks Indexes MultiViews\n") + cnf.write("\t\tAllowOverride All\n") + cnf.write("\t\tOrder allow,deny\n") + cnf.write("\t\tallow from all\n") + cnf.write("\t\n") + + cnf.write("\t\n") + cnf.write("\t\tAuthType Basic\n") + cnf.write("\t\tAuthName \"%s\"\n" % boto.config.get("Trac", "name")) + cnf.write("\t\tRequire valid-user\n") + cnf.write("\t\tAuthUserFile /mnt/apache/passwd/passwords\n") + cnf.write("\t\n") + + data_dir = boto.config.get("Trac", "data_dir") + for env in os.listdir(data_dir): + if(env[0] != "."): + cnf.write("\t\n" % env) + cnf.write("\t\tSetHandler mod_python\n") + cnf.write("\t\tPythonInterpreter main_interpreter\n") + cnf.write("\t\tPythonHandler trac.web.modpython_frontend\n") + cnf.write("\t\tPythonOption TracEnv %s/%s\n" % (data_dir, env)) + cnf.write("\t\tPythonOption TracUriRoot /trac/%s\n" % env) + cnf.write("\t\n") + + svn_dir = boto.config.get("Trac", "svn_dir") + for env in os.listdir(svn_dir): + if(env[0] != "."): + cnf.write("\t\n" % env) + cnf.write("\t\tDAV svn\n") + cnf.write("\t\tSVNPath %s/%s\n" % (svn_dir, env)) + cnf.write("\t\n") + + cnf.write("\tErrorLog /var/log/apache2/error.log\n") + cnf.write("\tLogLevel warn\n") + cnf.write("\tCustomLog /var/log/apache2/access.log combined\n") + cnf.write("\tServerSignature On\n") + SSLCertificateFile = boto.config.get("Trac", "SSLCertificateFile") + if SSLCertificateFile: + cnf.write("\tSSLEngine On\n") + cnf.write("\tSSLCertificateFile %s\n" % SSLCertificateFile) + + SSLCertificateKeyFile = boto.config.get("Trac", "SSLCertificateKeyFile") + if SSLCertificateKeyFile: + cnf.write("\tSSLCertificateKeyFile %s\n" % SSLCertificateKeyFile) + + SSLCertificateChainFile = boto.config.get("Trac", "SSLCertificateChainFile") + if SSLCertificateChainFile: + cnf.write("\tSSLCertificateChainFile %s\n" % SSLCertificateChainFile) + cnf.write("\n") + cnf.close() + self.run("a2ensite %s" % domain_info[0]) + self.run("/etc/init.d/apache2 force-reload") + + def main(self): + self.install() + self.setup_vhost() diff --git a/awx/lib/site-packages/boto/pyami/launch_ami.py b/awx/lib/site-packages/boto/pyami/launch_ami.py new file mode 100644 index 0000000000..243d56d2eb --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/launch_ami.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import getopt +import sys +import imp +import time +import boto + +usage_string = """ +SYNOPSIS + launch_ami.py -a ami_id [-b script_bucket] [-s script_name] + [-m module] [-c class_name] [-r] + [-g group] [-k key_name] [-n num_instances] + [-w] [extra_data] + Where: + ami_id - the id of the AMI you wish to launch + module - The name of the Python module containing the class you + want to run when the instance is started. If you use this + option the Python module must already be stored on the + instance in a location that is on the Python path. + script_file - The name of a local Python module that you would like + to have copied to S3 and then run on the instance + when it is started. The specified module must be + import'able (i.e. in your local Python path). It + will then be copied to the specified bucket in S3 + (see the -b option). Once the new instance(s) + start up the script will be copied from S3 and then + run locally on the instance. + class_name - The name of the class to be instantiated within the + module or script file specified. + script_bucket - the name of the bucket in which the script will be + stored + group - the name of the security group the instance will run in + key_name - the name of the keypair to use when launching the AMI + num_instances - how many instances of the AMI to launch (default 1) + input_queue_name - Name of SQS to read input messages from + output_queue_name - Name of SQS to write output messages to + extra_data - additional name-value pairs that will be passed as + userdata to the newly launched instance. These should + be of the form "name=value" + The -r option reloads the Python module to S3 without launching + another instance. This can be useful during debugging to allow + you to test a new version of your script without shutting down + your instance and starting up another one. + The -w option tells the script to run synchronously, meaning to + wait until the instance is actually up and running. It then prints + the IP address and internal and external DNS names before exiting. +""" + +def usage(): + print usage_string + sys.exit() + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'a:b:c:g:hi:k:m:n:o:rs:w', + ['ami', 'bucket', 'class', 'group', 'help', + 'inputqueue', 'keypair', 'module', + 'numinstances', 'outputqueue', + 'reload', 'script_name', 'wait']) + except: + usage() + params = {'module_name' : None, + 'script_name' : None, + 'class_name' : None, + 'script_bucket' : None, + 'group' : 'default', + 'keypair' : None, + 'ami' : None, + 'num_instances' : 1, + 'input_queue_name' : None, + 'output_queue_name' : None} + reload = None + wait = None + for o, a in opts: + if o in ('-a', '--ami'): + params['ami'] = a + if o in ('-b', '--bucket'): + params['script_bucket'] = a + if o in ('-c', '--class'): + params['class_name'] = a + if o in ('-g', '--group'): + params['group'] = a + if o in ('-h', '--help'): + usage() + if o in ('-i', '--inputqueue'): + params['input_queue_name'] = a + if o in ('-k', '--keypair'): + params['keypair'] = a + if o in ('-m', '--module'): + params['module_name'] = a + if o in ('-n', '--num_instances'): + params['num_instances'] = int(a) + if o in ('-o', '--outputqueue'): + params['output_queue_name'] = a + if o in ('-r', '--reload'): + reload = True + if o in ('-s', '--script'): + params['script_name'] = a + if o in ('-w', '--wait'): + wait = True + + # check required fields + required = ['ami'] + for pname in required: + if not params.get(pname, None): + print '%s is required' % pname + usage() + if params['script_name']: + # first copy the desired module file to S3 bucket + if reload: + print 'Reloading module %s to S3' % params['script_name'] + else: + print 'Copying module %s to S3' % params['script_name'] + l = imp.find_module(params['script_name']) + c = boto.connect_s3() + bucket = c.get_bucket(params['script_bucket']) + key = bucket.new_key(params['script_name']+'.py') + key.set_contents_from_file(l[0]) + params['script_md5'] = key.md5 + # we have everything we need, now build userdata string + l = [] + for k, v in params.items(): + if v: + l.append('%s=%s' % (k, v)) + c = boto.connect_ec2() + l.append('aws_access_key_id=%s' % c.aws_access_key_id) + l.append('aws_secret_access_key=%s' % c.aws_secret_access_key) + for kv in args: + l.append(kv) + s = '|'.join(l) + if not reload: + rs = c.get_all_images([params['ami']]) + img = rs[0] + r = img.run(user_data=s, key_name=params['keypair'], + security_groups=[params['group']], + max_count=params.get('num_instances', 1)) + print 'AMI: %s - %s (Started)' % (params['ami'], img.location) + print 'Reservation %s contains the following instances:' % r.id + for i in r.instances: + print '\t%s' % i.id + if wait: + running = False + while not running: + time.sleep(30) + [i.update() for i in r.instances] + status = [i.state for i in r.instances] + print status + if status.count('running') == len(r.instances): + running = True + for i in r.instances: + print 'Instance: %s' % i.ami_launch_index + print 'Public DNS Name: %s' % i.public_dns_name + print 'Private DNS Name: %s' % i.private_dns_name + +if __name__ == "__main__": + main() + diff --git a/awx/lib/site-packages/boto/pyami/scriptbase.py b/awx/lib/site-packages/boto/pyami/scriptbase.py new file mode 100644 index 0000000000..90522cad1a --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/scriptbase.py @@ -0,0 +1,44 @@ +import os +import sys +from boto.utils import ShellCommand, get_ts +import boto +import boto.utils + +class ScriptBase: + + def __init__(self, config_file=None): + self.instance_id = boto.config.get('Instance', 'instance-id', 'default') + self.name = self.__class__.__name__ + self.ts = get_ts() + if config_file: + boto.config.read(config_file) + + def notify(self, subject, body=''): + boto.utils.notify(subject, body) + + def mkdir(self, path): + if not os.path.isdir(path): + try: + os.mkdir(path) + except: + boto.log.error('Error creating directory: %s' % path) + + def umount(self, path): + if os.path.ismount(path): + self.run('umount %s' % path) + + def run(self, command, notify=True, exit_on_error=False, cwd=None): + self.last_command = ShellCommand(command, cwd=cwd) + if self.last_command.status != 0: + boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output)) + if notify: + self.notify('Error encountered', \ + 'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \ + (command, self.last_command.output)) + if exit_on_error: + sys.exit(-1) + return self.last_command.status + + def main(self): + pass + diff --git a/awx/lib/site-packages/boto/pyami/startup.py b/awx/lib/site-packages/boto/pyami/startup.py new file mode 100644 index 0000000000..2093151a7b --- /dev/null +++ b/awx/lib/site-packages/boto/pyami/startup.py @@ -0,0 +1,60 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import sys +import boto +from boto.utils import find_class +from boto import config +from boto.pyami.scriptbase import ScriptBase + + +class Startup(ScriptBase): + + def run_scripts(self): + scripts = config.get('Pyami', 'scripts') + if scripts: + for script in scripts.split(','): + script = script.strip(" ") + try: + pos = script.rfind('.') + if pos > 0: + mod_name = script[0:pos] + cls_name = script[pos+1:] + cls = find_class(mod_name, cls_name) + boto.log.info('Running Script: %s' % script) + s = cls() + s.main() + else: + boto.log.warning('Trouble parsing script: %s' % script) + except Exception, e: + boto.log.exception('Problem Running Script: %s. Startup process halting.' % script) + raise e + + def main(self): + self.run_scripts() + self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id')) + +if __name__ == "__main__": + if not config.has_section('loggers'): + boto.set_file_logger('startup', '/var/log/boto.log') + sys.path.append(config.get('Pyami', 'working_dir')) + su = Startup() + su.main() diff --git a/awx/lib/site-packages/boto/rds/__init__.py b/awx/lib/site-packages/boto/rds/__init__.py new file mode 100644 index 0000000000..751c5d51b5 --- /dev/null +++ b/awx/lib/site-packages/boto/rds/__init__.py @@ -0,0 +1,1470 @@ +# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import urllib +from boto.connection import AWSQueryConnection +from boto.rds.dbinstance import DBInstance +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.rds.optiongroup import OptionGroup, OptionGroupOption +from boto.rds.parametergroup import ParameterGroup +from boto.rds.dbsnapshot import DBSnapshot +from boto.rds.event import Event +from boto.rds.regioninfo import RDSRegionInfo +from boto.rds.dbsubnetgroup import DBSubnetGroup +from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership + +def regions(): + """ + Get all available regions for the RDS service. + + :rtype: list + :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo` + """ + return [RDSRegionInfo(name='us-east-1', + endpoint='rds.amazonaws.com'), + RDSRegionInfo(name='us-gov-west-1', + endpoint='rds.us-gov-west-1.amazonaws.com'), + RDSRegionInfo(name='eu-west-1', + endpoint='rds.eu-west-1.amazonaws.com'), + RDSRegionInfo(name='us-west-1', + endpoint='rds.us-west-1.amazonaws.com'), + RDSRegionInfo(name='us-west-2', + endpoint='rds.us-west-2.amazonaws.com'), + RDSRegionInfo(name='sa-east-1', + endpoint='rds.sa-east-1.amazonaws.com'), + RDSRegionInfo(name='ap-northeast-1', + endpoint='rds.ap-northeast-1.amazonaws.com'), + RDSRegionInfo(name='ap-southeast-1', + endpoint='rds.ap-southeast-1.amazonaws.com'), + RDSRegionInfo(name='ap-southeast-2', + endpoint='rds.ap-southeast-2.amazonaws.com'), + ] + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.rds.RDSConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.rds.RDSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None + +#boto.set_stream_logger('rds') + + +class RDSConnection(AWSQueryConnection): + + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'rds.amazonaws.com' + APIVersion = '2013-05-15' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True): + if not region: + region = RDSRegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs) + + def _required_auth_capability(self): + return ['hmac-v4'] + + # DB Instance methods + + def get_all_dbinstances(self, instance_id=None, max_records=None, + marker=None): + """ + Retrieve all the DBInstances in your account. + + :type instance_id: str + :param instance_id: DB Instance identifier. If supplied, only + information this instance will be returned. + Otherwise, info about all DB Instances will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbinstance.DBInstance` + """ + params = {} + if instance_id: + params['DBInstanceIdentifier'] = instance_id + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBInstances', params, + [('DBInstance', DBInstance)]) + + def create_dbinstance(self, + id, + allocated_storage, + instance_class, + master_username, + master_password, + port=3306, + engine='MySQL5.1', + db_name=None, + param_group=None, + security_groups=None, + availability_zone=None, + preferred_maintenance_window=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + engine_version=None, + auto_minor_version_upgrade=True, + character_set_name = None, + db_subnet_group_name = None, + license_model = None, + option_group_name = None, + iops=None, + vpc_security_groups=None, + ): + # API version: 2012-09-17 + # Parameter notes: + # ================= + # id should be db_instance_identifier according to API docs but has been left + # id for backwards compatibility + # + # security_groups should be db_security_groups according to API docs but has been left + # security_groups for backwards compatibility + # + # master_password should be master_user_password according to API docs but has been left + # master_password for backwards compatibility + # + # instance_class should be db_instance_class according to API docs but has been left + # instance_class for backwards compatibility + """ + Create a new DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type allocated_storage: int + :param allocated_storage: Initially allocated storage size, in GBs. + Valid values are depending on the engine value. + + * MySQL = 5--1024 + * oracle-se1 = 10--1024 + * oracle-se = 10--1024 + * oracle-ee = 10--1024 + * sqlserver-ee = 200--1024 + * sqlserver-se = 200--1024 + * sqlserver-ex = 30--1024 + * sqlserver-web = 30--1024 + + :type instance_class: str + :param instance_class: The compute and memory capacity of + the DBInstance. Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type engine: str + :param engine: Name of database engine. Defaults to MySQL but can be; + + * MySQL + * oracle-se1 + * oracle-se + * oracle-ee + * sqlserver-ee + * sqlserver-se + * sqlserver-ex + * sqlserver-web + + :type master_username: str + :param master_username: Name of master user for the DBInstance. + + * MySQL must be; + - 1--16 alphanumeric characters + - first character must be a letter + - cannot be a reserved MySQL word + + * Oracle must be: + - 1--30 alphanumeric characters + - first character must be a letter + - cannot be a reserved Oracle word + + * SQL Server must be: + - 1--128 alphanumeric characters + - first character must be a letter + - cannot be a reserver SQL Server word + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + + * MySQL must be 8--41 alphanumeric characters + + * Oracle must be 8--30 alphanumeric characters + + * SQL Server must be 8--128 alphanumeric characters. + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. + + * MySQL defaults to 3306 + + * Oracle defaults to 1521 + + * SQL Server defaults to 1433 and _cannot_ be 1434 or 3389 + + :type db_name: str + :param db_name: * MySQL: + Name of a database to create when the DBInstance + is created. Default is to create no databases. + + Must contain 1--64 alphanumeric characters and cannot + be a reserved MySQL word. + + * Oracle: + The Oracle System ID (SID) of the created DB instances. + Default is ORCL. Cannot be longer than 8 characters. + + * SQL Server: + Not applicable and must be None. + + :type param_group: str or ParameterGroup object + :param param_group: Name of DBParameterGroup or ParameterGroup instance + to associate with this DBInstance. If no groups are + specified no parameter groups will be used. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to + authorize on this DBInstance. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in UTC) + during which maintenance can occur. + Default is Sun:05:00-Sun:09:00 + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + For Microsoft SQL Server, must be set to false. You cannot set + the AvailabilityZone parameter if the MultiAZ parameter is + set to true. + + :type engine_version: str + :param engine_version: The version number of the database engine to use. + + * MySQL format example: 5.1.42 + + * Oracle format example: 11.2.0.2.v2 + + * SQL Server format example: 10.50.2789.0.v1 + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is True. + :type character_set_name: str + :param character_set_name: For supported engines, indicates that the DB Instance + should be associated with the specified CharacterSet. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :type license_model: str + :param license_model: License model information for this DB Instance. + + Valid values are; + - license-included + - bring-your-own-license + - general-public-license + + All license types are not supported on all engines. + + :type option_group_name: str + :param option_group_name: Indicates that the DB Instance should be associated + with the specified option group. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per second) to Provisioned + for the DB Instance. Can be modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you must allocated + 100 GB of storage space. This scales up to 1 TB / 10 000 IOPS for MySQL + and Oracle. MSSQL is limited to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and you must + allocate 100 GB of storage. + + :type vpc_security_groups: list of str or a VPCSecurityGroupMembership object + :param vpc_security_groups: List of VPC security group ids or a list of + VPCSecurityGroupMembership objects this DBInstance should be a member of + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + # boto argument alignment with AWS API parameter names: + # ===================================================== + # arg => AWS parameter + # allocated_storage => AllocatedStorage + # auto_minor_version_update => AutoMinorVersionUpgrade + # availability_zone => AvailabilityZone + # backup_retention_period => BackupRetentionPeriod + # character_set_name => CharacterSetName + # db_instance_class => DBInstanceClass + # db_instance_identifier => DBInstanceIdentifier + # db_name => DBName + # db_parameter_group_name => DBParameterGroupName + # db_security_groups => DBSecurityGroups.member.N + # db_subnet_group_name => DBSubnetGroupName + # engine => Engine + # engine_version => EngineVersion + # license_model => LicenseModel + # master_username => MasterUsername + # master_user_password => MasterUserPassword + # multi_az => MultiAZ + # option_group_name => OptionGroupName + # port => Port + # preferred_backup_window => PreferredBackupWindow + # preferred_maintenance_window => PreferredMaintenanceWindow + # vpc_security_groups => VpcSecurityGroupIds.member.N + params = { + 'AllocatedStorage': allocated_storage, + 'AutoMinorVersionUpgrade': str(auto_minor_version_upgrade).lower() if auto_minor_version_upgrade else None, + 'AvailabilityZone': availability_zone, + 'BackupRetentionPeriod': backup_retention_period, + 'CharacterSetName': character_set_name, + 'DBInstanceClass': instance_class, + 'DBInstanceIdentifier': id, + 'DBName': db_name, + 'DBParameterGroupName': (param_group.name + if isinstance(param_group, ParameterGroup) + else param_group), + 'DBSubnetGroupName': db_subnet_group_name, + 'Engine': engine, + 'EngineVersion': engine_version, + 'Iops': iops, + 'LicenseModel': license_model, + 'MasterUsername': master_username, + 'MasterUserPassword': master_password, + 'MultiAZ': str(multi_az).lower() if multi_az else None, + 'OptionGroupName': option_group_name, + 'Port': port, + 'PreferredBackupWindow': preferred_backup_window, + 'PreferredMaintenanceWindow': preferred_maintenance_window, + } + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, DBSecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'DBSecurityGroups.member') + + if vpc_security_groups: + l = [] + for vpc_grp in vpc_security_groups: + if isinstance(vpc_grp, VPCSecurityGroupMembership): + l.append(vpc_grp.vpc_group) + else: + l.append(vpc_grp) + self.build_list_params(params, l, 'VpcSecurityGroupIds.member') + + # Remove any params set to None + for k, v in params.items(): + if not v: del(params[k]) + + return self.get_object('CreateDBInstance', params, DBInstance) + + def create_dbinstance_read_replica(self, id, source_id, + instance_class=None, + port=3306, + availability_zone=None, + auto_minor_version_upgrade=None): + """ + Create a new DBInstance Read Replica. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type source_id: str + :param source_id: Unique identifier for the DB Instance for which this + DB Instance will act as a Read Replica. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Default is to inherit from + the source DB Instance. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Default is to inherit from source DB Instance. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is to inherit this value + from the source DB Instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + params = {'DBInstanceIdentifier': id, + 'SourceDBInstanceIdentifier': source_id} + if instance_class: + params['DBInstanceClass'] = instance_class + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if auto_minor_version_upgrade is not None: + if auto_minor_version_upgrade is True: + params['AutoMinorVersionUpgrade'] = 'true' + else: + params['AutoMinorVersionUpgrade'] = 'false' + + return self.get_object('CreateDBInstanceReadReplica', + params, DBInstance) + + def modify_dbinstance(self, id, param_group=None, security_groups=None, + preferred_maintenance_window=None, + master_password=None, allocated_storage=None, + instance_class=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + apply_immediately=False, + iops=None, + vpc_security_groups=None, + ): + """ + Modify an existing DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + + :type param_group: str or ParameterGroup object + :param param_group: Name of DBParameterGroup or ParameterGroup instance + to associate with this DBInstance. If no groups are + specified no parameter groups will be used. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to authorize on + this DBInstance. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in UTC) + during which maintenance can + occur. + Default is Sun:05:00-Sun:09:00 + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + Must be 4-15 alphanumeric characters. + + :type allocated_storage: int + :param allocated_storage: The new allocated storage size, in GBs. + Valid values are [5-1024] + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Changes will be applied at + next maintenance window unless + apply_immediately is True. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type apply_immediately: bool + :param apply_immediately: If true, the modifications will be applied + as soon as possible rather than waiting for + the next preferred maintenance window. + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per second) to Provisioned + for the DB Instance. Can be modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you must allocated + 100 GB of storage space. This scales up to 1 TB / 10 000 IOPS for MySQL + and Oracle. MSSQL is limited to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and you must + allocate 100 GB of storage. + + :type vpc_security_groups: list of str or a VPCSecurityGroupMembership object + :param vpc_security_groups: List of VPC security group ids or a + VPCSecurityGroupMembership object this DBInstance should be a member of + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The modified db instance. + """ + params = {'DBInstanceIdentifier': id} + if param_group: + params['DBParameterGroupName'] = (param_group.name + if isinstance(param_group, ParameterGroup) + else param_group) + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, DBSecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'DBSecurityGroups.member') + if vpc_security_groups: + l = [] + for vpc_grp in vpc_security_groups: + if isinstance(vpc_grp, VPCSecurityGroupMembership): + l.append(vpc_grp.vpc_group) + else: + l.append(vpc_grp) + self.build_list_params(params, l, 'VpcSecurityGroupIds.member') + if preferred_maintenance_window: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if master_password: + params['MasterUserPassword'] = master_password + if allocated_storage: + params['AllocatedStorage'] = allocated_storage + if instance_class: + params['DBInstanceClass'] = instance_class + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window: + params['PreferredBackupWindow'] = preferred_backup_window + if multi_az: + params['MultiAZ'] = 'true' + if apply_immediately: + params['ApplyImmediately'] = 'true' + if iops: + params['Iops'] = iops + + return self.get_object('ModifyDBInstance', params, DBInstance) + + def delete_dbinstance(self, id, skip_final_snapshot=False, + final_snapshot_id=''): + """ + Delete an existing DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + + :type skip_final_snapshot: bool + :param skip_final_snapshot: This parameter determines whether a final + db snapshot is created before the instance + is deleted. If True, no snapshot + is created. If False, a snapshot + is created before deleting the instance. + + :type final_snapshot_id: str + :param final_snapshot_id: If a final snapshot is requested, this + is the identifier used for that snapshot. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The deleted db instance. + """ + params = {'DBInstanceIdentifier': id} + if skip_final_snapshot: + params['SkipFinalSnapshot'] = 'true' + else: + params['SkipFinalSnapshot'] = 'false' + params['FinalDBSnapshotIdentifier'] = final_snapshot_id + return self.get_object('DeleteDBInstance', params, DBInstance) + + def reboot_dbinstance(self, id): + """ + Reboot DBInstance. + + :type id: str + :param id: Unique identifier of the instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The rebooting db instance. + """ + params = {'DBInstanceIdentifier': id} + return self.get_object('RebootDBInstance', params, DBInstance) + + # DBParameterGroup methods + + def get_all_dbparameter_groups(self, groupname=None, max_records=None, + marker=None): + """ + Get all parameter groups associated with your account in a region. + + :type groupname: str + :param groupname: The name of the DBParameter group to retrieve. + If not provided, all DBParameter groups will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.ec2.parametergroup.ParameterGroup` + """ + params = {} + if groupname: + params['DBParameterGroupName'] = groupname + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBParameterGroups', params, + [('DBParameterGroup', ParameterGroup)]) + + def get_all_dbparameters(self, groupname, source=None, + max_records=None, marker=None): + """ + Get all parameters associated with a ParameterGroup + + :type groupname: str + :param groupname: The name of the DBParameter group to retrieve. + + :type source: str + :param source: Specifies which parameters to return. + If not specified, all parameters will be returned. + Valid values are: user|system|engine-default + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: :class:`boto.ec2.parametergroup.ParameterGroup` + :return: The ParameterGroup + """ + params = {'DBParameterGroupName': groupname} + if source: + params['Source'] = source + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + pg = self.get_object('DescribeDBParameters', params, ParameterGroup) + pg.name = groupname + return pg + + def create_parameter_group(self, name, engine='MySQL5.1', description=''): + """ + Create a new dbparameter group for your account. + + :type name: string + :param name: The name of the new dbparameter group + + :type engine: str + :param engine: Name of database engine. + + :type description: string + :param description: The description of the new dbparameter group + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBParameterGroupName': name, + 'DBParameterGroupFamily': engine, + 'Description': description} + return self.get_object('CreateDBParameterGroup', params, ParameterGroup) + + def modify_parameter_group(self, name, parameters=None): + """ + Modify a ParameterGroup for your account. + + :type name: string + :param name: The name of the new ParameterGroup + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The new parameters + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBParameterGroupName': name} + for i in range(0, len(parameters)): + parameter = parameters[i] + parameter.merge(params, i+1) + return self.get_list('ModifyDBParameterGroup', params, + ParameterGroup, verb='POST') + + def reset_parameter_group(self, name, reset_all_params=False, + parameters=None): + """ + Resets some or all of the parameters of a ParameterGroup to the + default value + + :type key_name: string + :param key_name: The name of the ParameterGroup to reset + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The parameters to reset. If not supplied, + all parameters will be reset. + """ + params = {'DBParameterGroupName': name} + if reset_all_params: + params['ResetAllParameters'] = 'true' + else: + params['ResetAllParameters'] = 'false' + for i in range(0, len(parameters)): + parameter = parameters[i] + parameter.merge(params, i+1) + return self.get_status('ResetDBParameterGroup', params) + + def delete_parameter_group(self, name): + """ + Delete a ParameterGroup from your account. + + :type key_name: string + :param key_name: The name of the ParameterGroup to delete + """ + params = {'DBParameterGroupName': name} + return self.get_status('DeleteDBParameterGroup', params) + + # DBSecurityGroup methods + + def get_all_dbsecurity_groups(self, groupname=None, max_records=None, + marker=None): + """ + Get all security groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of security groups to retrieve. + If not provided, all security groups will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + """ + params = {} + if groupname: + params['DBSecurityGroupName'] = groupname + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBSecurityGroups', params, + [('DBSecurityGroup', DBSecurityGroup)]) + + def create_dbsecurity_group(self, name, description=None): + """ + Create a new security group for your account. + This will create the security group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new security group + + :type description: string + :param description: The description of the new security group + + :rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + :return: The newly created DBSecurityGroup + """ + params = {'DBSecurityGroupName': name} + if description: + params['DBSecurityGroupDescription'] = description + group = self.get_object('CreateDBSecurityGroup', params, + DBSecurityGroup) + group.name = name + group.description = description + return group + + def delete_dbsecurity_group(self, name): + """ + Delete a DBSecurityGroup from your account. + + :type key_name: string + :param key_name: The name of the DBSecurityGroup to delete + """ + params = {'DBSecurityGroupName': name} + return self.get_status('DeleteDBSecurityGroup', params) + + def authorize_dbsecurity_group(self, group_name, cidr_ip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Add a new rule to an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR a CIDR block but not both. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group + you are granting access to. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The ID of the owner of the EC2 + security group you are granting + access to. + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'DBSecurityGroupName': group_name} + if ec2_security_group_name: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + if cidr_ip: + params['CIDRIP'] = urllib.quote(cidr_ip) + return self.get_object('AuthorizeDBSecurityGroupIngress', params, + DBSecurityGroup) + + def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=None, + ec2_security_group_owner_id=None, cidr_ip=None): + """ + Remove an existing rule from an existing security group. + You need to pass in either ec2_security_group_name and + ec2_security_group_owner_id OR a CIDR block. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group + from which you are removing access. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The ID of the owner of the EC2 + security from which you are + removing access. + + :type cidr_ip: string + :param cidr_ip: The CIDR block from which you are removing access. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'DBSecurityGroupName': group_name} + if ec2_security_group_name: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + if cidr_ip: + params['CIDRIP'] = cidr_ip + return self.get_object('RevokeDBSecurityGroupIngress', params, + DBSecurityGroup) + + # For backwards compatibility. This method was improperly named + # in previous versions. I have renamed it to match the others. + revoke_security_group = revoke_dbsecurity_group + + # DBSnapshot methods + + def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None, + max_records=None, marker=None): + """ + Get information about DB Snapshots. + + :type snapshot_id: str + :param snapshot_id: The unique identifier of an RDS snapshot. + If not provided, all RDS snapshots will be returned. + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. If provided, + only the DBSnapshots related to that instance will + be returned. + If not provided, all RDS snapshots will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot` + """ + params = {} + if snapshot_id: + params['DBSnapshotIdentifier'] = snapshot_id + if instance_id: + params['DBInstanceIdentifier'] = instance_id + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBSnapshots', params, + [('DBSnapshot', DBSnapshot)]) + + def create_dbsnapshot(self, snapshot_id, dbinstance_id): + """ + Create a new DB snapshot. + + :type snapshot_id: string + :param snapshot_id: The identifier for the DBSnapshot + + :type dbinstance_id: string + :param dbinstance_id: The source identifier for the RDS instance from + which the snapshot is created. + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + params = {'DBSnapshotIdentifier': snapshot_id, + 'DBInstanceIdentifier': dbinstance_id} + return self.get_object('CreateDBSnapshot', params, DBSnapshot) + + def delete_dbsnapshot(self, identifier): + """ + Delete a DBSnapshot + + :type identifier: string + :param identifier: The identifier of the DBSnapshot to delete + """ + params = {'DBSnapshotIdentifier': identifier} + return self.get_object('DeleteDBSnapshot', params, DBSnapshot) + + def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id, + instance_class, port=None, + availability_zone=None, + multi_az=None, + auto_minor_version_upgrade=None, + db_subnet_group_name=None): + """ + Create a new DBInstance from a DB snapshot. + + :type identifier: string + :param identifier: The identifier for the DBSnapshot + + :type instance_id: string + :param instance_id: The source identifier for the RDS instance from + which the snapshot is created. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + Default is the API default. + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is the API default. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The newly created DBInstance + """ + params = {'DBSnapshotIdentifier': identifier, + 'DBInstanceIdentifier': instance_id, + 'DBInstanceClass': instance_class} + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if multi_az is not None: + params['MultiAZ'] = str(multi_az).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str(auto_minor_version_upgrade).lower() + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + return self.get_object('RestoreDBInstanceFromDBSnapshot', + params, DBInstance) + + def restore_dbinstance_from_point_in_time(self, source_instance_id, + target_instance_id, + use_latest=False, + restore_time=None, + dbinstance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None): + + """ + Create a new DBInstance from a point in time. + + :type source_instance_id: string + :param source_instance_id: The identifier for the source DBInstance. + + :type target_instance_id: string + :param target_instance_id: The identifier of the new DBInstance. + + :type use_latest: bool + :param use_latest: If True, the latest snapshot availabile will + be used. + + :type restore_time: datetime + :param restore_time: The date and time to restore from. Only + used if use_latest is False. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The newly created DBInstance + """ + params = {'SourceDBInstanceIdentifier': source_instance_id, + 'TargetDBInstanceIdentifier': target_instance_id} + if use_latest: + params['UseLatestRestorableTime'] = 'true' + elif restore_time: + params['RestoreTime'] = restore_time.isoformat() + if dbinstance_class: + params['DBInstanceClass'] = dbinstance_class + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + return self.get_object('RestoreDBInstanceToPointInTime', + params, DBInstance) + + # Events + + def get_all_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, + max_records=None, marker=None): + """ + Get information about events related to your DBInstances, + DBSecurityGroups and DBParameterGroups. + + :type source_identifier: str + :param source_identifier: If supplied, the events returned will be + limited to those that apply to the identified + source. The value of this parameter depends + on the value of source_type. If neither + parameter is specified, all events in the time + span will be returned. + + :type source_type: str + :param source_type: Specifies how the source_identifier should + be interpreted. Valid values are: + b-instance | db-security-group | + db-parameter-group | db-snapshot + + :type start_time: datetime + :param start_time: The beginning of the time interval for events. + If not supplied, all available events will + be returned. + + :type end_time: datetime + :param end_time: The ending of the time interval for events. + If not supplied, all available events will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.event.Event` + """ + params = {} + if source_identifier and source_type: + params['SourceIdentifier'] = source_identifier + params['SourceType'] = source_type + if start_time: + params['StartTime'] = start_time.isoformat() + if end_time: + params['EndTime'] = end_time.isoformat() + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeEvents', params, [('Event', Event)]) + + def create_db_subnet_group(self, name, desc, subnet_ids): + """ + Create a new Database Subnet Group. + + :type name: string + :param name: The identifier for the db_subnet_group + + :type desc: string + :param desc: A description of the db_subnet_group + + :type subnet_ids: list + :param subnets: A list of the subnet identifiers to include in the + db_subnet_group + + :rtype: :class:`boto.rds.dbsubnetgroup.DBSubnetGroup + :return: the created db_subnet_group + """ + + params = {'DBSubnetGroupName': name, + 'DBSubnetGroupDescription': desc} + self.build_list_params(params, subnet_ids, 'SubnetIds.member') + + return self.get_object('CreateDBSubnetGroup', params, DBSubnetGroup) + + def delete_db_subnet_group(self, name): + """ + Delete a Database Subnet Group. + + :type name: string + :param name: The identifier of the db_subnet_group to delete + + :rtype: :class:`boto.rds.dbsubnetgroup.DBSubnetGroup` + :return: The deleted db_subnet_group. + """ + + params = {'DBSubnetGroupName': name} + + return self.get_object('DeleteDBSubnetGroup', params, DBSubnetGroup) + + + def get_all_db_subnet_groups(self, name=None, max_records=None, marker=None): + """ + Retrieve all the DBSubnetGroups in your account. + + :type name: str + :param name: DBSubnetGroup name If supplied, only information about + this DBSubnetGroup will be returned. Otherwise, info + about all DBSubnetGroups will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a Token will be + returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsubnetgroup.DBSubnetGroup` + """ + params = dict() + if name != None: + params['DBSubnetGroupName'] = name + if max_records != None: + params['MaxRecords'] = max_records + if marker != None: + params['Marker'] = marker + + return self.get_list('DescribeDBSubnetGroups', params, [('DBSubnetGroup',DBSubnetGroup)]) + + def modify_db_subnet_group(self, name, description=None, subnet_ids=None): + """ + Modify a parameter group for your account. + + :type name: string + :param name: The name of the new parameter group + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The new parameters + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBSubnetGroupName': name} + if description != None: + params['DBSubnetGroupDescription'] = description + if subnet_ids != None: + self.build_list_params(params, subnet_ids, 'SubnetIds.member') + + return self.get_object('ModifyDBSubnetGroup', params, DBSubnetGroup) + + def create_option_group(self, name, engine_name, major_engine_version, + description=None): + """ + Create a new option group for your account. + This will create the option group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new option group + + :type engine_name: string + :param engine_name: Specifies the name of the engine that this option + group should be associated with. + + :type major_engine_version: string + :param major_engine_version: Specifies the major version of the engine + that this option group should be + associated with. + + :type description: string + :param description: The description of the new option group + + :rtype: :class:`boto.rds.optiongroup.OptionGroup` + :return: The newly created OptionGroup + """ + params = { + 'OptionGroupName': name, + 'EngineName': engine_name, + 'MajorEngineVersion': major_engine_version, + 'OptionGroupDescription': description, + } + group = self.get_object('CreateOptionGroup', params, OptionGroup) + group.name = name + group.engine_name = engine_name + group.major_engine_version = major_engine_version + group.description = description + return group + + def delete_option_group(self, name): + """ + Delete an OptionGroup from your account. + + :type key_name: string + :param key_name: The name of the OptionGroup to delete + """ + params = {'OptionGroupName': name} + return self.get_status('DeleteOptionGroup', params) + + def describe_option_groups(self, name=None, engine_name=None, + major_engine_version=None, max_records=100, + marker=None): + """ + Describes the available option groups. + + :type name: str + :param name: The name of the option group to describe. Cannot be + supplied together with engine_name or major_engine_version. + + :type engine_name: str + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: datetime + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific + database engine version. If specified, then + engine_name must also be specified. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.optiongroup.OptionGroup` + """ + params = {} + if name: + params['OptionGroupName'] = name + elif engine_name and major_engine_version: + params['EngineName'] = engine_name + params['MajorEngineVersion'] = major_engine_version + if max_records: + params['MaxRecords'] = int(max_records) + if marker: + params['Marker'] = marker + return self.get_list('DescribeOptionGroups', params, [ + ('OptionGroup', OptionGroup) + ]) + + def describe_option_group_options(self, engine_name=None, + major_engine_version=None, max_records=100, + marker=None): + """ + Describes the available option group options. + + :type engine_name: str + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: datetime + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific + database engine version. If specified, then + engine_name must also be specified. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.optiongroup.Option` + """ + params = {} + if engine_name and major_engine_version: + params['EngineName'] = engine_name + params['MajorEngineVersion'] = major_engine_version + if max_records: + params['MaxRecords'] = int(max_records) + if marker: + params['Marker'] = marker + return self.get_list('DescribeOptionGroupOptions', params, [ + ('OptionGroupOptions', OptionGroupOption) + ]) diff --git a/awx/lib/site-packages/boto/rds/dbinstance.py b/awx/lib/site-packages/boto/rds/dbinstance.py new file mode 100644 index 0000000000..043052ea7c --- /dev/null +++ b/awx/lib/site-packages/boto/rds/dbinstance.py @@ -0,0 +1,385 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.rds.parametergroup import ParameterGroup +from boto.rds.statusinfo import StatusInfo +from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership +from boto.resultset import ResultSet + + +class DBInstance(object): + """ + Represents a RDS DBInstance + + Properties reference available from the AWS documentation at + http://goo.gl/sC2Kn + + :ivar connection: connection + :ivar id: The name and identifier of the DBInstance + :ivar create_time: The date and time of creation + :ivar engine: The database engine being used + :ivar status: The status of the database in a string. e.g. "available" + :ivar allocated_storage: The size of the disk in gigabytes (int). + :ivar endpoint: A tuple that describes the hostname and port of + the instance. This is only available when the database is + in status "available". + :ivar instance_class: Contains the name of the compute and memory + capacity class of the DB Instance. + :ivar master_username: The username that is set as master username + at creation time. + :ivar parameter_groups: Provides the list of DB Parameter Groups + applied to this DB Instance. + :ivar security_groups: Provides List of DB Security Group elements + containing only DBSecurityGroup.Name and DBSecurityGroup.Status + subelements. + :ivar availability_zone: Specifies the name of the Availability Zone + the DB Instance is located in. + :ivar backup_retention_period: Specifies the number of days for + which automatic DB Snapshots are retained. + :ivar preferred_backup_window: Specifies the daily time range during + which automated backups are created if automated backups are + enabled, as determined by the backup_retention_period. + :ivar preferred_maintenance_window: Specifies the weekly time + range (in UTC) during which system maintenance can occur. (string) + :ivar latest_restorable_time: Specifies the latest time to which + a database can be restored with point-in-time restore. (string) + :ivar multi_az: Boolean that specifies if the DB Instance is a + Multi-AZ deployment. + :ivar iops: The current number of provisioned IOPS for the DB Instance. + Can be None if this is a standard instance. + :ivar vpc_security_groups: List of VPC Security Group Membership elements + containing only VpcSecurityGroupMembership.VpcSecurityGroupId and + VpcSecurityGroupMembership.Status subelements. + :ivar pending_modified_values: Specifies that changes to the + DB Instance are pending. This element is only included when changes + are pending. Specific changes are identified by subelements. + :ivar read_replica_dbinstance_identifiers: List of read replicas + associated with this DB instance. + :ivar status_infos: The status of a Read Replica. If the instance is not a + for a read replica, this will be blank. + """ + + def __init__(self, connection=None, id=None): + self.connection = connection + self.id = id + self.create_time = None + self.engine = None + self.status = None + self.allocated_storage = None + self.endpoint = None + self.instance_class = None + self.master_username = None + self.parameter_groups = [] + self.security_groups = [] + self.read_replica_dbinstance_identifiers = [] + self.availability_zone = None + self.backup_retention_period = None + self.preferred_backup_window = None + self.preferred_maintenance_window = None + self.latest_restorable_time = None + self.multi_az = False + self.iops = None + self.vpc_security_groups = None + self.pending_modified_values = None + self._in_endpoint = False + self._port = None + self._address = None + self.status_infos = None + + def __repr__(self): + return 'DBInstance:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'Endpoint': + self._in_endpoint = True + elif name == 'DBParameterGroups': + self.parameter_groups = ResultSet([('DBParameterGroup', + ParameterGroup)]) + return self.parameter_groups + elif name == 'DBSecurityGroups': + self.security_groups = ResultSet([('DBSecurityGroup', + DBSecurityGroup)]) + return self.security_groups + elif name == 'VpcSecurityGroups': + self.vpc_security_groups = ResultSet([('VpcSecurityGroupMembership', + VPCSecurityGroupMembership)]) + return self.vpc_security_groups + elif name == 'PendingModifiedValues': + self.pending_modified_values = PendingModifiedValues() + return self.pending_modified_values + elif name == 'ReadReplicaDBInstanceIdentifiers': + self.read_replica_dbinstance_identifiers = \ + ReadReplicaDBInstanceIdentifiers() + return self.read_replica_dbinstance_identifiers + elif name == 'StatusInfos': + self.status_infos = ResultSet([ + ('DBInstanceStatusInfo', StatusInfo) + ]) + return self.status_infos + return None + + def endElement(self, name, value, connection): + if name == 'DBInstanceIdentifier': + self.id = value + elif name == 'DBInstanceStatus': + self.status = value + elif name == 'InstanceCreateTime': + self.create_time = value + elif name == 'Engine': + self.engine = value + elif name == 'DBInstanceStatus': + self.status = value + elif name == 'AllocatedStorage': + self.allocated_storage = int(value) + elif name == 'DBInstanceClass': + self.instance_class = value + elif name == 'MasterUsername': + self.master_username = value + elif name == 'Port': + if self._in_endpoint: + self._port = int(value) + elif name == 'Address': + if self._in_endpoint: + self._address = value + elif name == 'Endpoint': + self.endpoint = (self._address, self._port) + self._in_endpoint = False + elif name == 'AvailabilityZone': + self.availability_zone = value + elif name == 'BackupRetentionPeriod': + self.backup_retention_period = value + elif name == 'LatestRestorableTime': + self.latest_restorable_time = value + elif name == 'PreferredMaintenanceWindow': + self.preferred_maintenance_window = value + elif name == 'PreferredBackupWindow': + self.preferred_backup_window = value + elif name == 'MultiAZ': + if value.lower() == 'true': + self.multi_az = True + elif name == 'Iops': + self.iops = int(value) + else: + setattr(self, name, value) + + @property + def security_group(self): + """ + Provide backward compatibility for previous security_group + attribute. + """ + if len(self.security_groups) > 0: + return self.security_groups[-1] + else: + return None + + @property + def parameter_group(self): + """ + Provide backward compatibility for previous parameter_group + attribute. + """ + if len(self.parameter_groups) > 0: + return self.parameter_groups[-1] + else: + return None + + def snapshot(self, snapshot_id): + """ + Create a new DB snapshot of this DBInstance. + + :type identifier: string + :param identifier: The identifier for the DBSnapshot + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + return self.connection.create_dbsnapshot(snapshot_id, self.id) + + def reboot(self): + """ + Reboot this DBInstance + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + return self.connection.reboot_dbinstance(self.id) + + def update(self, validate=False): + """ + Update the DB instance's status information by making a call to fetch + the current instance attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + instance the update method returns quietly. If the + validate param is True, however, it will raise a + ValueError exception if no data is returned from EC2. + """ + rs = self.connection.get_all_dbinstances(self.id) + if len(rs) > 0: + for i in rs: + if i.id == self.id: + self.__dict__.update(i.__dict__) + elif validate: + raise ValueError('%s is not a valid Instance ID' % self.id) + return self.status + + def stop(self, skip_final_snapshot=False, final_snapshot_id=''): + """ + Delete this DBInstance. + + :type skip_final_snapshot: bool + :param skip_final_snapshot: This parameter determines whether + a final db snapshot is created before the instance is + deleted. If True, no snapshot is created. If False, a + snapshot is created before deleting the instance. + + :type final_snapshot_id: str + :param final_snapshot_id: If a final snapshot is requested, this + is the identifier used for that snapshot. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The deleted db instance. + """ + return self.connection.delete_dbinstance(self.id, + skip_final_snapshot, + final_snapshot_id) + + def modify(self, param_group=None, security_groups=None, + preferred_maintenance_window=None, + master_password=None, allocated_storage=None, + instance_class=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + iops=None, + vpc_security_groups=None, + apply_immediately=False): + """ + Modify this DBInstance. + + :type param_group: str + :param param_group: Name of DBParameterGroup to associate with + this DBInstance. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to + authorize on this DBInstance. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in + UTC) during which maintenance can occur. Default is + Sun:05:00-Sun:09:00 + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + Must be 4-15 alphanumeric characters. + + :type allocated_storage: int + :param allocated_storage: The new allocated storage size, in GBs. + Valid values are [5-1024] + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Changes will be applied at next maintenance + window unless apply_immediately is True. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type apply_immediately: bool + :param apply_immediately: If true, the modifications will be + applied as soon as possible rather than waiting for the + next preferred maintenance window. + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which + automated backups are retained. Setting this to zero + disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during + which automated backups are created (if enabled). Must be + in h24:mi-hh24:mi format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per + second) to Provisioned for the DB Instance. Can be + modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you + must allocated 100 GB of storage space. This scales up to + 1 TB / 10 000 IOPS for MySQL and Oracle. MSSQL is limited + to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and + you must allocate 100 GB of storage. + + :type vpc_security_groups: list + :param vpc_security_groups: List of VPCSecurityGroupMembership + that this DBInstance is a memberof. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The modified db instance. + """ + return self.connection.modify_dbinstance(self.id, + param_group, + security_groups, + preferred_maintenance_window, + master_password, + allocated_storage, + instance_class, + backup_retention_period, + preferred_backup_window, + multi_az, + apply_immediately, + iops, + vpc_security_groups) + + +class PendingModifiedValues(dict): + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name != 'PendingModifiedValues': + self[name] = value + + +class ReadReplicaDBInstanceIdentifiers(list): + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ReadReplicaDBInstanceIdentifier': + self.append(value) diff --git a/awx/lib/site-packages/boto/rds/dbsecuritygroup.py b/awx/lib/site-packages/boto/rds/dbsecuritygroup.py new file mode 100644 index 0000000000..378360667d --- /dev/null +++ b/awx/lib/site-packages/boto/rds/dbsecuritygroup.py @@ -0,0 +1,186 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an DBSecurityGroup +""" +from boto.ec2.securitygroup import SecurityGroup + +class DBSecurityGroup(object): + """ + Represents an RDS database security group + + Properties reference available from the AWS documentation at + http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html + + :ivar Status: The current status of the security group. Possible values are + [ active, ? ]. Reference documentation lacks specifics of possibilities + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object + :ivar description: The description of the security group + :ivar ec2_groups: List of :py:class:`EC2 Security Group + ` objects that this security + group PERMITS + :ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange` + objects (containing CIDR addresses) that this security group PERMITS + :ivar name: Name of the security group + :ivar owner_id: ID of the owner of the security group. Can be 'None' + """ + def __init__(self, connection=None, owner_id=None, + name=None, description=None): + self.connection = connection + self.owner_id = owner_id + self.name = name + self.description = description + self.ec2_groups = [] + self.ip_ranges = [] + + def __repr__(self): + return 'DBSecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'IPRange': + cidr = IPRange(self) + self.ip_ranges.append(cidr) + return cidr + elif name == 'EC2SecurityGroup': + ec2_grp = EC2SecurityGroup(self) + self.ec2_groups.append(ec2_grp) + return ec2_grp + else: + return None + + def endElement(self, name, value, connection): + if name == 'OwnerId': + self.owner_id = value + elif name == 'DBSecurityGroupName': + self.name = value + elif name == 'DBSecurityGroupDescription': + self.description = value + elif name == 'IPRanges': + pass + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_dbsecurity_group(self.name) + + def authorize(self, cidr_ip=None, ec2_group=None): + """ + Add a new rule to this DBSecurity group. + You need to pass in either a CIDR block to authorize or + and EC2 SecurityGroup. + + :type cidr_ip: string + :param cidr_ip: A valid CIDR IP range to authorize + + :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup` + :param ec2_group: An EC2 security group to authorize + + :rtype: bool + :return: True if successful. + """ + if isinstance(ec2_group, SecurityGroup): + group_name = ec2_group.name + group_owner_id = ec2_group.owner_id + else: + group_name = None + group_owner_id = None + return self.connection.authorize_dbsecurity_group(self.name, + cidr_ip, + group_name, + group_owner_id) + + def revoke(self, cidr_ip=None, ec2_group=None): + """ + Revoke access to a CIDR range or EC2 SecurityGroup. + You need to pass in either a CIDR block or + an EC2 SecurityGroup from which to revoke access. + + :type cidr_ip: string + :param cidr_ip: A valid CIDR IP range to revoke + + :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup` + :param ec2_group: An EC2 security group to revoke + + :rtype: bool + :return: True if successful. + """ + if isinstance(ec2_group, SecurityGroup): + group_name = ec2_group.name + group_owner_id = ec2_group.owner_id + return self.connection.revoke_dbsecurity_group( + self.name, + ec2_security_group_name=group_name, + ec2_security_group_owner_id=group_owner_id) + + # Revoking by CIDR IP range + return self.connection.revoke_dbsecurity_group( + self.name, cidr_ip=cidr_ip) + +class IPRange(object): + """ + Describes a CIDR address range for use in a DBSecurityGroup + + :ivar cidr_ip: IP Address range + """ + + def __init__(self, parent=None): + self.parent = parent + self.cidr_ip = None + self.status = None + + def __repr__(self): + return 'IPRange:%s' % self.cidr_ip + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'CIDRIP': + self.cidr_ip = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + +class EC2SecurityGroup(object): + """ + Describes an EC2 security group for use in a DBSecurityGroup + """ + + def __init__(self, parent=None): + self.parent = parent + self.name = None + self.owner_id = None + + def __repr__(self): + return 'EC2SecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'EC2SecurityGroupName': + self.name = value + elif name == 'EC2SecurityGroupOwnerId': + self.owner_id = value + else: + setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/rds/dbsnapshot.py b/awx/lib/site-packages/boto/rds/dbsnapshot.py new file mode 100644 index 0000000000..acacd73dcd --- /dev/null +++ b/awx/lib/site-packages/boto/rds/dbsnapshot.py @@ -0,0 +1,108 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class DBSnapshot(object): + """ + Represents a RDS DB Snapshot + + Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DBSnapshot.html + + :ivar EngineVersion: Specifies the version of the database engine + :ivar LicenseModel: License model information for the restored DB instance + :ivar allocated_storage: Specifies the allocated storage size in gigabytes (GB) + :ivar availability_zone: Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot + :ivar connection: boto.rds.RDSConnection associated with the current object + :ivar engine: Specifies the name of the database engine + :ivar id: Specifies the identifier for the DB Snapshot (DBSnapshotIdentifier) + :ivar instance_create_time: Specifies the time (UTC) when the snapshot was taken + :ivar instance_id: Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from (DBInstanceIdentifier) + :ivar master_username: Provides the master username for the DB Instance + :ivar port: Specifies the port that the database engine was listening on at the time of the snapshot + :ivar snapshot_create_time: Provides the time (UTC) when the snapshot was taken + :ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-master-credentials ] + """ + + def __init__(self, connection=None, id=None): + self.connection = connection + self.id = id + self.engine = None + self.snapshot_create_time = None + self.instance_create_time = None + self.port = None + self.status = None + self.availability_zone = None + self.master_username = None + self.allocated_storage = None + self.instance_id = None + self.availability_zone = None + + def __repr__(self): + return 'DBSnapshot:%s' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Engine': + self.engine = value + elif name == 'InstanceCreateTime': + self.instance_create_time = value + elif name == 'SnapshotCreateTime': + self.snapshot_create_time = value + elif name == 'DBInstanceIdentifier': + self.instance_id = value + elif name == 'DBSnapshotIdentifier': + self.id = value + elif name == 'Port': + self.port = int(value) + elif name == 'Status': + self.status = value + elif name == 'AvailabilityZone': + self.availability_zone = value + elif name == 'MasterUsername': + self.master_username = value + elif name == 'AllocatedStorage': + self.allocated_storage = int(value) + elif name == 'SnapshotTime': + self.time = value + else: + setattr(self, name, value) + + def update(self, validate=False): + """ + Update the DB snapshot's status information by making a call to fetch + the current snapshot attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + instance the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_dbsnapshots(self.id) + if len(rs) > 0: + for i in rs: + if i.id == self.id: + self.__dict__.update(i.__dict__) + elif validate: + raise ValueError('%s is not a valid Snapshot ID' % self.id) + return self.status diff --git a/awx/lib/site-packages/boto/rds/dbsubnetgroup.py b/awx/lib/site-packages/boto/rds/dbsubnetgroup.py new file mode 100644 index 0000000000..4b9fc58059 --- /dev/null +++ b/awx/lib/site-packages/boto/rds/dbsubnetgroup.py @@ -0,0 +1,69 @@ +# Copyright (c) 2013 Franc Carter - franc.carter@gmail.com +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an DBSubnetGroup +""" + +class DBSubnetGroup(object): + """ + Represents an RDS database subnet group + + Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSubnetGroup.html + + :ivar status: The current status of the subnet group. Possibile values are [ active, ? ]. Reference documentation lacks specifics of possibilities + :ivar connection: boto.rds.RDSConnection associated with the current object + :ivar description: The description of the subnet group + :ivar subnet_ids: List of subnet identifiers in the group + :ivar name: Name of the subnet group + :ivar vpc_id: The ID of the VPC the subnets are inside + """ + def __init__(self, connection=None, name=None, description=None, subnet_ids=None): + self.connection = connection + self.name = name + self.description = description + if subnet_ids != None: + self.subnet_ids = subnet_ids + else: + self.subnet_ids = [] + self.vpc_id = None + self.status = None + + def __repr__(self): + return 'DBSubnetGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'SubnetIdentifier': + self.subnet_ids.append(value) + elif name == 'DBSubnetGroupName': + self.name = value + elif name == 'DBSubnetGroupDescription': + self.description = value + elif name == 'VpcId': + self.vpc_id = value + elif name == 'SubnetGroupStatus': + self.status = value + else: + setattr(self, name, value) + diff --git a/awx/lib/site-packages/boto/rds/event.py b/awx/lib/site-packages/boto/rds/event.py new file mode 100644 index 0000000000..a91f8f08a5 --- /dev/null +++ b/awx/lib/site-packages/boto/rds/event.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Event(object): + + def __init__(self, connection=None): + self.connection = connection + self.message = None + self.source_identifier = None + self.source_type = None + self.engine = None + self.date = None + + def __repr__(self): + return '"%s"' % self.message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'SourceIdentifier': + self.source_identifier = value + elif name == 'SourceType': + self.source_type = value + elif name == 'Message': + self.message = value + elif name == 'Date': + self.date = value + else: + setattr(self, name, value) + diff --git a/awx/lib/site-packages/boto/rds/optiongroup.py b/awx/lib/site-packages/boto/rds/optiongroup.py new file mode 100644 index 0000000000..8968b6cad6 --- /dev/null +++ b/awx/lib/site-packages/boto/rds/optiongroup.py @@ -0,0 +1,404 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an OptionGroup +""" + +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.resultset import ResultSet + + +class OptionGroup(object): + """ + Represents an RDS option group + + Properties reference available from the AWS documentation at + http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_OptionGroup.html + + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the + current object + :ivar name: Name of the option group + :ivar description: The description of the option group + :ivar engine_name: The name of the database engine to use + :ivar major_engine_version: The major version number of the engine to use + :ivar allow_both_vpc_and_nonvpc: Indicates whether this option group can be + applied to both VPC and non-VPC instances. + The value ``True`` indicates the option + group can be applied to both VPC and + non-VPC instances. + :ivar vpc_id: If AllowsVpcAndNonVpcInstanceMemberships is 'false', this + field is blank. If AllowsVpcAndNonVpcInstanceMemberships is + ``True`` and this field is blank, then this option group can + be applied to both VPC and non-VPC instances. If this field + contains a value, then this option group can only be applied + to instances that are in the VPC indicated by this field. + :ivar options: The list of :py:class:`boto.rds.optiongroup.Option` objects + associated with the group + """ + def __init__(self, connection=None, name=None, engine_name=None, + major_engine_version=None, description=None, + allow_both_vpc_and_nonvpc=False, vpc_id=None): + self.name = name + self.engine_name = engine_name + self.major_engine_version = major_engine_version + self.description = description + self.allow_both_vpc_and_nonvpc = allow_both_vpc_and_nonvpc + self.vpc_id = vpc_id + self.options = [] + + def __repr__(self): + return 'OptionGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Options': + self.options = ResultSet([ + ('Options', Option) + ]) + else: + return None + + def endElement(self, name, value, connection): + if name == 'OptionGroupName': + self.name = value + elif name == 'EngineName': + self.engine_name = value + elif name == 'MajorEngineVersion': + self.major_engine_version = value + elif name == 'OptionGroupDescription': + self.description = value + elif name == 'AllowsVpcAndNonVpcInstanceMemberships': + if value.lower() == 'true': + self.allow_both_vpc_and_nonvpc = True + else: + self.allow_both_vpc_and_nonvpc = False + elif name == 'VpcId': + self.vpc_id = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_option_group(self.name) + + +class Option(object): + """ + Describes a Option for use in an OptionGroup + + :ivar name: The name of the option + :ivar description: The description of the option. + :ivar permanent: Indicate if this option is permanent. + :ivar persistent: Indicate if this option is persistent. + :ivar port: If required, the port configured for this option to use. + :ivar settings: The option settings for this option. + :ivar db_security_groups: If the option requires access to a port, then + this DB Security Group allows access to the port. + :ivar vpc_security_groups: If the option requires access to a port, then + this VPC Security Group allows access to the + port. + """ + def __init__(self, name=None, description=None, permanent=False, + persistent=False, port=None, settings=None, + db_security_groups=None, vpc_security_groups=None): + self.name = name + self.description = description + self.permanent = permanent + self.persistent = persistent + self.port = port + self.settings = settings + self.db_security_groups = db_security_groups + self.vpc_security_groups = vpc_security_groups + + if self.settings is None: + self.settings = [] + + if self.db_security_groups is None: + self.db_security_groups = [] + + if self.vpc_security_groups is None: + self.vpc_security_groups = [] + + def __repr__(self): + return 'Option:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'OptionSettings': + self.settings = ResultSet([ + ('OptionSettings', OptionSetting) + ]) + elif name == 'DBSecurityGroupMemberships': + self.db_security_groups = ResultSet([ + ('DBSecurityGroupMemberships', DBSecurityGroup) + ]) + elif name == 'VpcSecurityGroupMemberships': + self.vpc_security_groups = ResultSet([ + ('VpcSecurityGroupMemberships', VpcSecurityGroup) + ]) + else: + return None + + def endElement(self, name, value, connection): + if name == 'OptionName': + self.name = value + elif name == 'OptionDescription': + self.description = value + elif name == 'Permanent': + if value.lower() == 'true': + self.permenant = True + else: + self.permenant = False + elif name == 'Persistent': + if value.lower() == 'true': + self.persistent = True + else: + self.persistent = False + elif name == 'Port': + self.port = int(value) + else: + setattr(self, name, value) + + +class OptionSetting(object): + """ + Describes a OptionSetting for use in an Option + + :ivar name: The name of the option that has settings that you can set. + :ivar description: The description of the option setting. + :ivar value: The current value of the option setting. + :ivar default_value: The default value of the option setting. + :ivar allowed_values: The allowed values of the option setting. + :ivar data_type: The data type of the option setting. + :ivar apply_type: The DB engine specific parameter type. + :ivar is_modifiable: A Boolean value that, when true, indicates the option + setting can be modified from the default. + :ivar is_collection: Indicates if the option setting is part of a + collection. + """ + + def __init__(self, name=None, description=None, value=None, + default_value=False, allowed_values=None, data_type=None, + apply_type=None, is_modifiable=False, is_collection=False): + self.name = name + self.description = description + self.value = value + self.default_value = default_value + self.allowed_values = allowed_values + self.data_type = data_type + self.apply_type = apply_type + self.is_modifiable = is_modifiable + self.is_collection = is_collection + + def __repr__(self): + return 'OptionSetting:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'Value': + self.value = value + elif name == 'DefaultValue': + self.default_value = value + elif name == 'AllowedValues': + self.allowed_values = value + elif name == 'DataType': + self.data_type = value + elif name == 'ApplyType': + self.apply_type = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + elif name == 'IsCollection': + if value.lower() == 'true': + self.is_collection = True + else: + self.is_collection = False + else: + setattr(self, name, value) + + +class VpcSecurityGroup(object): + """ + Describes a VPC security group for use in a OptionGroup + """ + def __init__(self, vpc_id=None, status=None): + self.vpc_id = vpc_id + self.status = status + + def __repr__(self): + return 'VpcSecurityGroup:%s' % self.vpc_id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'VpcSecurityGroupId': + self.vpc_id = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + + +class OptionGroupOption(object): + """ + Describes a OptionGroupOption for use in an OptionGroup + + :ivar name: The name of the option + :ivar description: The description of the option. + :ivar engine_name: Engine name that this option can be applied to. + :ivar major_engine_version: Indicates the major engine version that the + option is available for. + :ivar min_minor_engine_version: The minimum required engine version for the + option to be applied. + :ivar permanent: Indicate if this option is permanent. + :ivar persistent: Indicate if this option is persistent. + :ivar port_required: Specifies whether the option requires a port. + :ivar default_port: If the option requires a port, specifies the default + port for the option. + :ivar settings: The option settings for this option. + :ivar depends_on: List of all options that are prerequisites for this + option. + """ + def __init__(self, name=None, description=None, engine_name=None, + major_engine_version=None, min_minor_engine_version=None, + permanent=False, persistent=False, port_required=False, + default_port=None, settings=None, depends_on=None): + self.name = name + self.description = description + self.engine_name = engine_name + self.major_engine_version = major_engine_version + self.min_minor_engine_version = min_minor_engine_version + self.permanent = permanent + self.persistent = persistent + self.port_required = port_required + self.default_port = default_port + self.settings = settings + self.depends_on = depends_on + + if self.settings is None: + self.settings = [] + + if self.depends_on is None: + self.depends_on = [] + + def __repr__(self): + return 'OptionGroupOption:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'OptionGroupOptionSettings': + self.settings = ResultSet([ + ('OptionGroupOptionSettings', OptionGroupOptionSetting) + ]) + elif name == 'OptionsDependedOn': + self.depends_on = [] + else: + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'EngineName': + self.engine_name = value + elif name == 'MajorEngineVersion': + self.major_engine_version = value + elif name == 'MinimumRequiredMinorEngineVersion': + self.min_minor_engine_version = value + elif name == 'Permanent': + if value.lower() == 'true': + self.permenant = True + else: + self.permenant = False + elif name == 'Persistent': + if value.lower() == 'true': + self.persistent = True + else: + self.persistent = False + elif name == 'PortRequired': + if value.lower() == 'true': + self.port_required = True + else: + self.port_required = False + elif name == 'DefaultPort': + self.default_port = int(value) + else: + setattr(self, name, value) + + +class OptionGroupOptionSetting(object): + """ + Describes a OptionGroupOptionSetting for use in an OptionGroupOption. + + :ivar name: The name of the option that has settings that you can set. + :ivar description: The description of the option setting. + :ivar value: The current value of the option setting. + :ivar default_value: The default value of the option setting. + :ivar allowed_values: The allowed values of the option setting. + :ivar data_type: The data type of the option setting. + :ivar apply_type: The DB engine specific parameter type. + :ivar is_modifiable: A Boolean value that, when true, indicates the option + setting can be modified from the default. + :ivar is_collection: Indicates if the option setting is part of a + collection. + """ + + def __init__(self, name=None, description=None, default_value=False, + allowed_values=None, apply_type=None, is_modifiable=False): + self.name = name + self.description = description + self.default_value = default_value + self.allowed_values = allowed_values + self.apply_type = apply_type + self.is_modifiable = is_modifiable + + def __repr__(self): + return 'OptionGroupOptionSetting:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'SettingName': + self.name = value + elif name == 'SettingDescription': + self.description = value + elif name == 'DefaultValue': + self.default_value = value + elif name == 'AllowedValues': + self.allowed_values = value + elif name == 'ApplyType': + self.apply_type = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + else: + setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/rds/parametergroup.py b/awx/lib/site-packages/boto/rds/parametergroup.py new file mode 100644 index 0000000000..e52890cf73 --- /dev/null +++ b/awx/lib/site-packages/boto/rds/parametergroup.py @@ -0,0 +1,201 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class ParameterGroup(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + self.name = None + self.description = None + self.engine = None + self._current_param = None + + def __repr__(self): + return 'ParameterGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Parameter': + if self._current_param: + self[self._current_param.name] = self._current_param + self._current_param = Parameter(self) + return self._current_param + + def endElement(self, name, value, connection): + if name == 'DBParameterGroupName': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'Engine': + self.engine = value + else: + setattr(self, name, value) + + def modifiable(self): + mod = [] + for key in self: + p = self[key] + if p.is_modifiable: + mod.append(p) + return mod + + def get_params(self): + pg = self.connection.get_all_dbparameters(self.name) + self.update(pg) + + def add_param(self, name, value, apply_method): + param = Parameter() + param.name = name + param.value = value + param.apply_method = apply_method + self.params.append(param) + +class Parameter(object): + """ + Represents a RDS Parameter + """ + + ValidTypes = {'integer' : int, + 'string' : str, + 'boolean' : bool} + ValidSources = ['user', 'system', 'engine-default'] + ValidApplyTypes = ['static', 'dynamic'] + ValidApplyMethods = ['immediate', 'pending-reboot'] + + def __init__(self, group=None, name=None): + self.group = group + self.name = name + self._value = None + self.type = 'string' + self.source = None + self.is_modifiable = True + self.description = None + self.apply_method = None + self.allowed_values = None + + def __repr__(self): + return 'Parameter:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'ParameterName': + self.name = value + elif name == 'ParameterValue': + self._value = value + elif name == 'DataType': + if value in self.ValidTypes: + self.type = value + elif name == 'Source': + if value in self.ValidSources: + self.source = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + elif name == 'Description': + self.description = value + elif name == 'ApplyType': + if value in self.ValidApplyTypes: + self.apply_type = value + elif name == 'AllowedValues': + self.allowed_values = value + else: + setattr(self, name, value) + + def merge(self, d, i): + prefix = 'Parameters.member.%d.' % i + if self.name: + d[prefix+'ParameterName'] = self.name + if self._value is not None: + d[prefix+'ParameterValue'] = self._value + if self.apply_type: + d[prefix+'ApplyMethod'] = self.apply_method + + def _set_string_value(self, value): + if not isinstance(value, str) or isinstance(value, unicode): + raise ValueError('value must be of type str') + if self.allowed_values: + choices = self.allowed_values.split(',') + if value not in choices: + raise ValueError('value must be in %s' % self.allowed_values) + self._value = value + + def _set_integer_value(self, value): + if isinstance(value, str) or isinstance(value, unicode): + value = int(value) + if isinstance(value, int) or isinstance(value, long): + if self.allowed_values: + min, max = self.allowed_values.split('-') + if value < int(min) or value > int(max): + raise ValueError('range is %s' % self.allowed_values) + self._value = value + else: + raise ValueError('value must be integer') + + def _set_boolean_value(self, value): + if isinstance(value, bool): + self._value = value + elif isinstance(value, str) or isinstance(value, unicode): + if value.lower() == 'true': + self._value = True + else: + self._value = False + else: + raise ValueError('value must be boolean') + + def set_value(self, value): + if self.type == 'string': + self._set_string_value(value) + elif self.type == 'integer': + self._set_integer_value(value) + elif self.type == 'boolean': + self._set_boolean_value(value) + else: + raise TypeError('unknown type (%s)' % self.type) + + def get_value(self): + if self._value == None: + return self._value + if self.type == 'string': + return self._value + elif self.type == 'integer': + if not isinstance(self._value, int) and not isinstance(self._value, long): + self._set_integer_value(self._value) + return self._value + elif self.type == 'boolean': + if not isinstance(self._value, bool): + self._set_boolean_value(self._value) + return self._value + else: + raise TypeError('unknown type (%s)' % self.type) + + value = property(get_value, set_value, 'The value of the parameter') + + def apply(self, immediate=False): + if immediate: + self.apply_method = 'immediate' + else: + self.apply_method = 'pending-reboot' + self.group.connection.modify_parameter_group(self.group.name, [self]) + diff --git a/awx/lib/site-packages/boto/rds/regioninfo.py b/awx/lib/site-packages/boto/rds/regioninfo.py new file mode 100644 index 0000000000..7d186ae0cb --- /dev/null +++ b/awx/lib/site-packages/boto/rds/regioninfo.py @@ -0,0 +1,32 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class RDSRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None): + from boto.rds import RDSConnection + RegionInfo.__init__(self, connection, name, endpoint, + RDSConnection) diff --git a/awx/lib/site-packages/boto/rds/statusinfo.py b/awx/lib/site-packages/boto/rds/statusinfo.py new file mode 100644 index 0000000000..d4ff9b08de --- /dev/null +++ b/awx/lib/site-packages/boto/rds/statusinfo.py @@ -0,0 +1,54 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +class StatusInfo(object): + """ + Describes a status message. + """ + + def __init__(self, status_type=None, normal=None, status=None, message=None): + self.status_type = status_type + self.normal = normal + self.status = status + self.message = message + + def __repr__(self): + return 'StatusInfo:%s' % self.message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'StatusType': + self.status_type = value + elif name == 'Normal': + if value.lower() == 'true': + self.normal = True + else: + self.normal = False + elif name == 'Status': + self.status = value + elif name == 'Message': + self.message = value + else: + setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/rds/vpcsecuritygroupmembership.py b/awx/lib/site-packages/boto/rds/vpcsecuritygroupmembership.py new file mode 100644 index 0000000000..e0092e9c2f --- /dev/null +++ b/awx/lib/site-packages/boto/rds/vpcsecuritygroupmembership.py @@ -0,0 +1,85 @@ +# Copyright (c) 2013 Anthony Tonns http://www.corsis.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a VPCSecurityGroupMembership +""" + + +class VPCSecurityGroupMembership(object): + """ + Represents VPC Security Group that this RDS database is a member of + + Properties reference available from the AWS documentation at + http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/\ + API_VpcSecurityGroupMembership.html + + Example:: + pri = "sg-abcdefgh" + sec = "sg-hgfedcba" + + # Create with list of str + db = c.create_dbinstance(... vpc_security_groups=[pri], ... ) + + # Modify with list of str + db.modify(... vpc_security_groups=[pri,sec], ... ) + + # Create with objects + memberships = [] + membership = VPCSecurityGroupMembership() + membership.vpc_group = pri + memberships.append(membership) + + db = c.create_dbinstance(... vpc_security_groups=memberships, ... ) + + # Modify with objects + memberships = d.vpc_security_groups + membership = VPCSecurityGroupMembership() + membership.vpc_group = sec + memberships.append(membership) + + db.modify(... vpc_security_groups=memberships, ... ) + + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the + current object + :ivar vpc_group: This id of the VPC security group + :ivar status: Status of the VPC security group membership + ` objects that this RDS Instance + is a member of + """ + def __init__(self, connection=None, status=None, vpc_group=None): + self.connection = connection + self.status = status + self.vpc_group = vpc_group + + def __repr__(self): + return 'VPCSecurityGroupMembership:%s' % self.vpc_group + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'VpcSecurityGroupId': + self.vpc_group = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/redshift/__init__.py b/awx/lib/site-packages/boto/redshift/__init__.py new file mode 100644 index 0000000000..fca2a790da --- /dev/null +++ b/awx/lib/site-packages/boto/redshift/__init__.py @@ -0,0 +1,55 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo + + +def regions(): + """ + Get all available regions for the AWS Redshift service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.redshift.layer1 import RedshiftConnection + cls = RedshiftConnection + return [ + RegionInfo(name='us-east-1', + endpoint='redshift.us-east-1.amazonaws.com', + connection_cls=cls), + RegionInfo(name='us-west-2', + endpoint='redshift.us-west-2.amazonaws.com', + connection_cls=cls), + RegionInfo(name='eu-west-1', + endpoint='redshift.eu-west-1.amazonaws.com', + connection_cls=cls), + RegionInfo(name='ap-northeast-1', + endpoint='redshift.ap-northeast-1.amazonaws.com', + connection_cls=cls), + ] + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/redshift/exceptions.py b/awx/lib/site-packages/boto/redshift/exceptions.py new file mode 100644 index 0000000000..b4f60dd804 --- /dev/null +++ b/awx/lib/site-packages/boto/redshift/exceptions.py @@ -0,0 +1,190 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class ClusterNotFoundFault(JSONResponseError): + pass + + +class InvalidClusterSnapshotStateFault(JSONResponseError): + pass + + +class ClusterSnapshotNotFoundFault(JSONResponseError): + pass + + +class ClusterNotFoundFault(JSONResponseError): + pass + + +class ClusterSecurityGroupQuotaExceededFault(JSONResponseError): + pass + + +class ReservedNodeOfferingNotFoundFault(JSONResponseError): + pass + + +class InvalidSubnet(JSONResponseError): + pass + + +class ClusterSubnetGroupQuotaExceededFault(JSONResponseError): + pass + + +class InvalidClusterStateFault(JSONResponseError): + pass + + +class InvalidClusterParameterGroupStateFault(JSONResponseError): + pass + + +class ClusterParameterGroupAlreadyExistsFault(JSONResponseError): + pass + + +class InvalidClusterSecurityGroupStateFault(JSONResponseError): + pass + + +class InvalidRestoreFault(JSONResponseError): + pass + + +class AuthorizationNotFoundFault(JSONResponseError): + pass + + +class ResizeNotFoundFault(JSONResponseError): + pass + + +class NumberOfNodesQuotaExceededFault(JSONResponseError): + pass + + +class ClusterSnapshotAlreadyExistsFault(JSONResponseError): + pass + + +class AuthorizationQuotaExceededFault(JSONResponseError): + pass + + +class AuthorizationAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterSnapshotQuotaExceededFault(JSONResponseError): + pass + + +class ReservedNodeNotFoundFault(JSONResponseError): + pass + + +class ReservedNodeAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterSecurityGroupAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterParameterGroupNotFoundFault(JSONResponseError): + pass + + +class ReservedNodeQuotaExceededFault(JSONResponseError): + pass + + +class ClusterQuotaExceededFault(JSONResponseError): + pass + + +class ClusterSubnetQuotaExceededFault(JSONResponseError): + pass + + +class UnsupportedOptionFault(JSONResponseError): + pass + + +class InvalidVPCNetworkStateFault(JSONResponseError): + pass + + +class ClusterSecurityGroupNotFoundFault(JSONResponseError): + pass + + +class InvalidClusterSubnetGroupStateFault(JSONResponseError): + pass + + +class ClusterSubnetGroupAlreadyExistsFault(JSONResponseError): + pass + + +class NumberOfNodesPerClusterLimitExceededFault(JSONResponseError): + pass + + +class ClusterSubnetGroupNotFoundFault(JSONResponseError): + pass + + +class ClusterParameterGroupQuotaExceededFault(JSONResponseError): + pass + + +class ClusterAlreadyExistsFault(JSONResponseError): + pass + + +class InsufficientClusterCapacityFault(JSONResponseError): + pass + + +class InvalidClusterSubnetStateFault(JSONResponseError): + pass + + +class SubnetAlreadyInUse(JSONResponseError): + pass + + +class InvalidParameterCombinationFault(JSONResponseError): + pass + + +class AccessToSnapshotDeniedFault(JSONResponseError): + pass + + +class UnauthorizedOperationFault(JSONResponseError): + pass diff --git a/awx/lib/site-packages/boto/redshift/layer1.py b/awx/lib/site-packages/boto/redshift/layer1.py new file mode 100644 index 0000000000..6ba3fd3d00 --- /dev/null +++ b/awx/lib/site-packages/boto/redshift/layer1.py @@ -0,0 +1,2189 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import json +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.redshift import exceptions + + +class RedshiftConnection(AWSQueryConnection): + """ + Amazon Redshift **Overview** + This is the Amazon Redshift API Reference. This guide provides + descriptions and samples of the Amazon Redshift API. + + Amazon Redshift manages all the work of setting up, operating, and + scaling a data warehouse: provisioning capacity, monitoring and + backing up the cluster, and applying patches and upgrades to the + Amazon Redshift engine. You can focus on using your data to + acquire new insights for your business and customers. + **Are You a First-Time Amazon Redshift User?** + If you are a first-time user of Amazon Redshift, we recommend that + you begin by reading the following sections: + + + + + Service Highlights and Pricing - The `product detail page`_ + provides the Amazon Redshift value proposition, service highlights + and pricing. + + Getting Started - The `Getting Started Guide`_ includes an + example that walks you through the process of creating a cluster, + creating database tables, uploading data, and testing queries. + + + + After you complete the Getting Started Guide, we recommend that + you explore one of the following guides: + + + + Cluster Management - If you are responsible for managing Amazon + Redshift clusters, the `Cluster Management Guide`_ shows you how + to create and manage Amazon Redshift clusters. If you are an + application developer, you can use the Amazon Redshift Query API + to manage clusters programmatically. Additionally, the AWS SDK + libraries that wrap the underlying Amazon Redshift API simplify + your programming tasks. If you prefer a more interactive way of + managing clusters, you can use the Amazon Redshift console and the + AWS command line interface (AWS CLI). For information about the + API and CLI, go to the following manuals : + + + API Reference ( this document ) + + `CLI Reference`_ + + + Amazon Redshift Database Database Developer - If you are a + database developer, the Amazon Redshift `Database Developer + Guide`_ explains how to design, build, query, and maintain the + databases that make up your data warehouse. + + + For a list of supported AWS regions where you can provision a + cluster, go to the `Regions and Endpoints`_ section in the Amazon + Web Services Glossary . + """ + APIVersion = "2012-12-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "redshift.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "ClusterNotFound": exceptions.ClusterNotFoundFault, + "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetStateFault, + "InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupStateFault, + "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceededFault, + "InvalidClusterState": exceptions.InvalidClusterStateFault, + "InvalidRestore": exceptions.InvalidRestoreFault, + "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExistsFault, + "NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceededFault, + "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFoundFault, + "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacityFault, + "UnauthorizedOperation": exceptions.UnauthorizedOperationFault, + "ClusterQuotaExceeded": exceptions.ClusterQuotaExceededFault, + "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkStateFault, + "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFoundFault, + "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceededFault, + "InvalidSubne": exceptions.InvalidSubnet, + "ResizeNotFound": exceptions.ResizeNotFoundFault, + "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFoundFault, + "ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceededFault, + "AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault, + "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupStateFault, + "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceededFault, + "ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceededFault, + "UnsupportedOption": exceptions.UnsupportedOptionFault, + "ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFoundFault, + "ClusterAlreadyExists": exceptions.ClusterAlreadyExistsFault, + "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExistsFault, + "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExistsFault, + "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceededFault, + "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFoundFault, + "AuthorizationNotFound": exceptions.AuthorizationNotFoundFault, + "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceededFault, + "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExistsFault, + "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotStateFault, + "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceededFault, + "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExistsFault, + "ReservedNodeNotFound": exceptions.ReservedNodeNotFoundFault, + "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupStateFault, + "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExistsFault, + "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, + "AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault, + "UnauthorizedOperation": exceptions.UnauthorizedOperationFault, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + kwargs['host'] = region.endpoint + AWSQueryConnection.__init__(self, **kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def authorize_cluster_security_group_ingress(self, + cluster_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Adds an inbound (ingress) rule to an Amazon Redshift security + group. Depending on whether the application accessing your + cluster is running on the Internet or an EC2 instance, you can + authorize inbound access to either a Classless Interdomain + Routing (CIDR) IP address range or an EC2 security group. You + can add as many as 20 ingress rules to an Amazon Redshift + security group. + The EC2 security group must be defined in the AWS region where + the cluster resides. + For an overview of CIDR blocks, see the Wikipedia article on + `Classless Inter-Domain Routing`_. + + You must also associate the security group with a cluster so + that clients running on these IP addresses or the EC2 instance + are authorized to connect to the cluster. For information + about managing security groups, go to `Working with Security + Groups`_ in the Amazon Redshift Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the security group to + which the ingress rule is added. + + :type cidrip: string + :param cidrip: The IP range to be added the Amazon Redshift security + group. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The EC2 security group to be added the + Amazon Redshift security group. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the owner + of the security group specified by the EC2SecurityGroupName + parameter. The AWS Access Key ID is not an acceptable value. + Example: `111122223333` + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='AuthorizeClusterSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def authorize_snapshot_access(self, snapshot_identifier, + account_with_restore_access, + snapshot_cluster_identifier=None): + """ + Authorizes the specified AWS customer account to restore the + specified snapshot. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: The identifier of the snapshot the account + is authorized to restore. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: + + :type account_with_restore_access: string + :param account_with_restore_access: The identifier of the AWS customer + account authorized to restore the specified snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'AccountWithRestoreAccess': account_with_restore_access, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='AuthorizeSnapshotAccess', + verb='POST', + path='/', params=params) + + def copy_cluster_snapshot(self, source_snapshot_identifier, + target_snapshot_identifier, + source_snapshot_cluster_identifier=None): + """ + Copies the specified automated cluster snapshot to a new + manual cluster snapshot. The source must be an automated + snapshot and it must be in the available state. + + When you delete a cluster, Amazon Redshift deletes any + automated snapshots of the cluster. Also, when the retention + period of the snapshot expires, Amazon Redshift automatically + deletes it. If you want to keep an automated snapshot for a + longer period, you can make a manual copy of the snapshot. + Manual snapshots are retained until you delete them. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type source_snapshot_identifier: string + :param source_snapshot_identifier: + The identifier for the source snapshot. + + Constraints: + + + + Must be the identifier for a valid automated snapshot whose state is + "available". + + :type source_snapshot_cluster_identifier: string + :param source_snapshot_cluster_identifier: + + :type target_snapshot_identifier: string + :param target_snapshot_identifier: + The identifier given to the new manual snapshot. + + Constraints: + + + + Cannot be null, empty, or blank. + + Must contain from 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for the AWS account that is making the request. + + """ + params = { + 'SourceSnapshotIdentifier': source_snapshot_identifier, + 'TargetSnapshotIdentifier': target_snapshot_identifier, + } + if source_snapshot_cluster_identifier is not None: + params['SourceSnapshotClusterIdentifier'] = source_snapshot_cluster_identifier + return self._make_request( + action='CopyClusterSnapshot', + verb='POST', + path='/', params=params) + + def create_cluster(self, cluster_identifier, node_type, master_username, + master_user_password, db_name=None, cluster_type=None, + cluster_security_groups=None, + vpc_security_group_ids=None, + cluster_subnet_group_name=None, + availability_zone=None, + preferred_maintenance_window=None, + cluster_parameter_group_name=None, + automated_snapshot_retention_period=None, port=None, + cluster_version=None, allow_version_upgrade=None, + number_of_nodes=None, publicly_accessible=None, + encrypted=None): + """ + Creates a new cluster. To create the cluster in virtual + private cloud (VPC), you must provide cluster subnet group + name. If you don't provide a cluster subnet group name or the + cluster security group parameter, Amazon Redshift creates a + non-VPC cluster, it associates the default cluster security + group with the cluster. For more information about managing + clusters, go to `Amazon Redshift Clusters`_ in the Amazon + Redshift Management Guide . + + :type db_name: string + :param db_name: + The name of the first database to be created when the cluster is + created. + + To create additional databases after the cluster is created, connect to + the cluster with a SQL client and use SQL commands to create a + database. For more information, go to `Create a Database`_ in the + Amazon Redshift Developer Guide. + + Default: `dev` + + Constraints: + + + + Must contain 1 to 64 alphanumeric characters. + + Must contain only lowercase letters. + + Cannot be a word that is reserved by the service. A list of reserved + words can be found in `Reserved Words`_ in the Amazon Redshift + Developer Guide. + + :type cluster_identifier: string + :param cluster_identifier: A unique identifier for the cluster. You use + this identifier to refer to the cluster for any subsequent cluster + operations such as deleting or modifying. The identifier also + appears in the Amazon Redshift console. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + Alphabetic characters must be lowercase. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for all clusters within an AWS account. + + + Example: `myexamplecluster` + + :type cluster_type: string + :param cluster_type: The type of the cluster. When cluster type is + specified as + + + `single-node`, the **NumberOfNodes** parameter is not required. + + `multi-node`, the **NumberOfNodes** parameter is required. + + + Valid Values: `multi-node` | `single-node` + + Default: `multi-node` + + :type node_type: string + :param node_type: The node type to be provisioned for the cluster. For + information about node types, go to ` Working with Clusters`_ in + the Amazon Redshift Management Guide . + Valid Values: `dw.hs1.xlarge` | `dw.hs1.8xlarge`. + + :type master_username: string + :param master_username: + The user name associated with the master user account for the cluster + that is being created. + + Constraints: + + + + Must be 1 - 128 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word. A list of reserved words can be found in + `Reserved Words`_ in the Amazon Redshift Developer Guide. + + :type master_user_password: string + :param master_user_password: + The password associated with the master user account for the cluster + that is being created. + + Constraints: + + + + Must be between 8 and 64 characters in length. + + Must contain at least one uppercase letter. + + Must contain at least one lowercase letter. + + Must contain one number. + + Can be any printable ASCII character (ASCII code 33 to 126) except ' + (single quote), " (double quote), \, /, @, or space. + + :type cluster_security_groups: list + :param cluster_security_groups: A list of security groups to be + associated with this cluster. + Default: The default cluster security group for Amazon Redshift. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of Virtual Private Cloud (VPC) + security groups to be associated with the cluster. + Default: The default VPC security group is associated with the cluster. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of a cluster subnet group to + be associated with this cluster. + If this parameter is not provided the resulting cluster will be + deployed outside virtual private cloud (VPC). + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone (AZ) in which you + want Amazon Redshift to provision the cluster. For example, if you + have several EC2 instances running in a specific Availability Zone, + then you might want the cluster to be provisioned in the same zone + in order to decrease network latency. + Default: A random, system-chosen Availability Zone in the region that + is specified by the endpoint. + + Example: `us-east-1d` + + Constraint: The specified Availability Zone must be in the same region + as the current endpoint. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which automated cluster maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: A 30-minute window selected at random from an 8-hour block of + time per region, occurring on a random day of the week. The + following list shows the time blocks for each region from which the + default maintenance windows are assigned. + + + + **US-East (Northern Virginia) Region:** 03:00-11:00 UTC + + **US-West (Oregon) Region** 06:00-14:00 UTC + + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Minimum 30-minute window. + + :type cluster_parameter_group_name: string + :param cluster_parameter_group_name: + The name of the parameter group to be associated with this cluster. + + Default: The default Amazon Redshift cluster parameter group. For + information about the default parameter group, go to `Working with + Amazon Redshift Parameter Groups`_ + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type automated_snapshot_retention_period: integer + :param automated_snapshot_retention_period: The number of days that + automated snapshots are retained. If the value is 0, automated + snapshots are disabled. Even if automated snapshots are disabled, + you can still create manual snapshots when you want with + CreateClusterSnapshot. + Default: `1` + + Constraints: Must be a value from 0 to 35. + + :type port: integer + :param port: The port number on which the cluster accepts incoming + connections. + The cluster is accessible only via the JDBC and ODBC connection + strings. Part of the connection string requires the port on which + the cluster will listen for incoming connections. + + Default: `5439` + + Valid Values: `1150-65535` + + :type cluster_version: string + :param cluster_version: The version of the Amazon Redshift engine + software that you want to deploy on the cluster. + The version selected runs on all the nodes in the cluster. + + Constraints: Only version 1.0 is currently available. + + Example: `1.0` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. + When a new version of the Amazon Redshift engine is released, you can + request that the service automatically apply upgrades during the + maintenance window to the Amazon Redshift engine that is running on + your cluster. + + Default: `True` + + :type number_of_nodes: integer + :param number_of_nodes: The number of compute nodes in the cluster. + This parameter is required when the **ClusterType** parameter is + specified as `multi-node`. + For information about determining how many nodes you need, go to ` + Working with Clusters`_ in the Amazon Redshift Management Guide . + + If you don't specify this parameter, you get a single-node cluster. + When requesting a multi-node cluster, you must specify the number + of nodes that you want in the cluster. + + Default: `1` + + Constraints: Value must be at least 1 and no more than 100. + + :type publicly_accessible: boolean + :param publicly_accessible: If `True`, the cluster can be accessed from + a public network. + + :type encrypted: boolean + :param encrypted: If `True`, the data in cluster is encrypted at rest. + Default: false + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'NodeType': node_type, + 'MasterUsername': master_username, + 'MasterUserPassword': master_user_password, + } + if db_name is not None: + params['DBName'] = db_name + if cluster_type is not None: + params['ClusterType'] = cluster_type + if cluster_security_groups is not None: + self.build_list_params(params, + cluster_security_groups, + 'ClusterSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if cluster_parameter_group_name is not None: + params['ClusterParameterGroupName'] = cluster_parameter_group_name + if automated_snapshot_retention_period is not None: + params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period + if port is not None: + params['Port'] = port + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + if number_of_nodes is not None: + params['NumberOfNodes'] = number_of_nodes + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if encrypted is not None: + params['Encrypted'] = str( + encrypted).lower() + return self._make_request( + action='CreateCluster', + verb='POST', + path='/', params=params) + + def create_cluster_parameter_group(self, parameter_group_name, + parameter_group_family, description): + """ + Creates an Amazon Redshift parameter group. + + Creating parameter groups is independent of creating clusters. + You can associate a cluster with a parameter group when you + create the cluster. You can also associate an existing cluster + with a parameter group after the cluster is created by using + ModifyCluster. + + Parameters in the parameter group define specific behavior + that applies to the databases you create on the cluster. For + more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: + The name of the cluster parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique withing your AWS account. + + + This value is stored as a lower-case string. + + :type parameter_group_family: string + :param parameter_group_family: The Amazon Redshift engine version to + which the cluster parameter group applies. The cluster engine + version determines the set of parameters. + To get a list of valid parameter group family names, you can call + DescribeClusterParameterGroups. By default, Amazon Redshift returns + a list of all the parameter groups that are owned by your AWS + account, including the default parameter groups for each Amazon + Redshift engine version. The parameter group family names + associated with the default parameter groups provide you the valid + values. For example, a valid family name is "redshift-1.0". + + :type description: string + :param description: A description of the parameter group. + + """ + params = { + 'ParameterGroupName': parameter_group_name, + 'ParameterGroupFamily': parameter_group_family, + 'Description': description, + } + return self._make_request( + action='CreateClusterParameterGroup', + verb='POST', + path='/', params=params) + + def create_cluster_security_group(self, cluster_security_group_name, + description): + """ + Creates a new Amazon Redshift security group. You use security + groups to control access to non-VPC clusters. + + For information about managing security groups, go to`Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name for the security group. + Amazon Redshift stores the value as a lowercase string. + Constraints: + + + + Must contain no more than 255 alphanumeric characters or hyphens. + + Must not be "Default". + + Must be unique for all security groups that are created by your AWS + account. + + + Example: `examplesecuritygroup` + + :type description: string + :param description: A description for the security group. + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + 'Description': description, + } + return self._make_request( + action='CreateClusterSecurityGroup', + verb='POST', + path='/', params=params) + + def create_cluster_snapshot(self, snapshot_identifier, + cluster_identifier): + """ + Creates a manual snapshot of the specified cluster. The + cluster must be in the "available" state. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: A unique identifier for the snapshot that + you are requesting. This identifier must be unique for all + snapshots within the AWS account. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type cluster_identifier: string + :param cluster_identifier: The cluster identifier for which you want a + snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'ClusterIdentifier': cluster_identifier, + } + return self._make_request( + action='CreateClusterSnapshot', + verb='POST', + path='/', params=params) + + def create_cluster_subnet_group(self, cluster_subnet_group_name, + description, subnet_ids): + """ + Creates a new Amazon Redshift subnet group. You must provide a + list of one or more subnets in your existing Amazon Virtual + Private Cloud (Amazon VPC) when creating Amazon Redshift + subnet group. + + For information about subnet groups, go to`Amazon Redshift + Cluster Subnet Groups`_ in the Amazon Redshift Management + Guide . + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name for the subnet group. Amazon + Redshift stores the value as a lowercase string. + Constraints: + + + + Must contain no more than 255 alphanumeric characters or hyphens. + + Must not be "Default". + + Must be unique for all subnet groups that are created by your AWS + account. + + + Example: `examplesubnetgroup` + + :type description: string + :param description: A description for the subnet group. + + :type subnet_ids: list + :param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets + can be modified in a single request. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + 'Description': description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + return self._make_request( + action='CreateClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_cluster(self, cluster_identifier, + skip_final_cluster_snapshot=None, + final_cluster_snapshot_identifier=None): + """ + Deletes a previously provisioned cluster. A successful + response from the web service indicates that the request was + received correctly. If a final cluster snapshot is requested + the status of the cluster will be "final-snapshot" while the + snapshot is being taken, then it's "deleting" once Amazon + Redshift begins deleting the cluster. Use DescribeClusters to + monitor the status of the deletion. The delete operation + cannot be canceled or reverted once submitted. For more + information about managing clusters, go to `Amazon Redshift + Clusters`_ in the Amazon Redshift Management Guide . + + :type cluster_identifier: string + :param cluster_identifier: + The identifier of the cluster to be deleted. + + Constraints: + + + + Must contain lowercase characters. + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type skip_final_cluster_snapshot: boolean + :param skip_final_cluster_snapshot: Determines whether a final snapshot + of the cluster is created before Amazon Redshift deletes the + cluster. If `True`, a final cluster snapshot is not created. If + `False`, a final cluster snapshot is created before the cluster is + deleted. + The FinalClusterSnapshotIdentifier parameter must be specified if + SkipFinalClusterSnapshot is `False`. + + Default: `False` + + :type final_cluster_snapshot_identifier: string + :param final_cluster_snapshot_identifier: + The identifier of the final snapshot that is to be created immediately + before deleting the cluster. If this parameter is provided, + SkipFinalClusterSnapshot must be `False`. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + if skip_final_cluster_snapshot is not None: + params['SkipFinalClusterSnapshot'] = str( + skip_final_cluster_snapshot).lower() + if final_cluster_snapshot_identifier is not None: + params['FinalClusterSnapshotIdentifier'] = final_cluster_snapshot_identifier + return self._make_request( + action='DeleteCluster', + verb='POST', + path='/', params=params) + + def delete_cluster_parameter_group(self, parameter_group_name): + """ + Deletes a specified Amazon Redshift parameter group. You + cannot delete a parameter group if it is associated with a + cluster. + + :type parameter_group_name: string + :param parameter_group_name: + The name of the parameter group to be deleted. + + Constraints: + + + + Must be the name of an existing cluster parameter group. + + Cannot delete a default cluster parameter group. + + """ + params = {'ParameterGroupName': parameter_group_name, } + return self._make_request( + action='DeleteClusterParameterGroup', + verb='POST', + path='/', params=params) + + def delete_cluster_security_group(self, cluster_security_group_name): + """ + Deletes an Amazon Redshift security group. + You cannot delete a security group that is associated with any + clusters. You cannot delete the default security group. + For information about managing security groups, go to`Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the cluster security + group to be deleted. + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + return self._make_request( + action='DeleteClusterSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_cluster_snapshot(self, snapshot_identifier, + snapshot_cluster_identifier=None): + """ + Deletes the specified manual snapshot. The snapshot must be in + the "available" state, with no other users authorized to + access the snapshot. + + Unlike automated snapshots, manual snapshots are retained even + after you delete your cluster. Amazon Redshift does not delete + your manual snapshots. You must delete manual snapshot + explicitly to avoid getting charged. If other accounts are + authorized to access the snapshot, you must revoke all of the + authorizations before you can delete the snapshot. + + :type snapshot_identifier: string + :param snapshot_identifier: The unique identifier of the manual + snapshot to be deleted. + Constraints: Must be the name of an existing snapshot that is in the + `available` state. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: + + """ + params = {'SnapshotIdentifier': snapshot_identifier, } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='DeleteClusterSnapshot', + verb='POST', + path='/', params=params) + + def delete_cluster_subnet_group(self, cluster_subnet_group_name): + """ + Deletes the specified cluster subnet group. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the cluster subnet group + name to be deleted. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + } + return self._make_request( + action='DeleteClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def describe_cluster_parameter_groups(self, parameter_group_name=None, + max_records=None, marker=None): + """ + Returns a list of Amazon Redshift parameter groups, including + parameter groups you created and the default parameter group. + For each parameter group, the response includes the parameter + group name, description, and parameter group family name. You + can optionally specify a name to retrieve the description of a + specific parameter group. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of a specific parameter group for + which to return details. By default, details about all parameter + groups and the default parameter group are returned. + + :type max_records: integer + :param max_records: The maximum number of parameter group records to + include in the response. If more records exist than the specified + `MaxRecords` value, the response includes a marker that you can use + in a subsequent DescribeClusterParameterGroups request to retrieve + the next set of records. + Default: `100` + + Constraints: Value must be at least 20 and no more than 100. + + :type marker: string + :param marker: An optional marker returned by a previous + DescribeClusterParameterGroups request to indicate the first + parameter group that the current request will return. + + """ + params = {} + if parameter_group_name is not None: + params['ParameterGroupName'] = parameter_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterParameterGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_parameters(self, parameter_group_name, source=None, + max_records=None, marker=None): + """ + Returns a detailed list of parameters contained within the + specified Amazon Redshift parameter group. For each parameter + the response includes information such as parameter name, + description, data type, value, whether the parameter value is + modifiable, and so on. + + You can specify source filter to retrieve parameters of only + specific type. For example, to retrieve parameters that were + modified by a user action such as from + ModifyClusterParameterGroup, you can specify source equal to + user . + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of a cluster parameter group for + which to return details. + + :type source: string + :param source: The parameter types to return. Specify `user` to show + parameters that are different form the default. Similarly, specify + `engine-default` to show parameters that are the same as the + default parameter group. + Default: All parameter types returned. + + Valid Values: `user` | `engine-default` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, response includes a marker that you can specify in your + subsequent request to retrieve remaining result. + Default: `100` + + Constraints: Value must be at least 20 and no more than 100. + + :type marker: string + :param marker: An optional marker returned from a previous + **DescribeClusterParameters** request. If this parameter is + specified, the response includes only records beyond the specified + marker, up to the value specified by `MaxRecords`. + + """ + params = {'ParameterGroupName': parameter_group_name, } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterParameters', + verb='POST', + path='/', params=params) + + def describe_cluster_security_groups(self, + cluster_security_group_name=None, + max_records=None, marker=None): + """ + Returns information about Amazon Redshift security groups. If + the name of a security group is specified, the response will + contain only information about only that security group. + + For information about managing security groups, go to`Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of a cluster security + group for which you are requesting details. You can specify either + the **Marker** parameter or a **ClusterSecurityGroupName** + parameter, but not both. + Example: `securitygroup1` + + :type max_records: integer + :param max_records: The maximum number of records to be included in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response, which you can use in a + subsequent DescribeClusterSecurityGroups request. + Default: `100` + + Constraints: Value must be at least 20 and no more than 100. + + :type marker: string + :param marker: An optional marker returned by a previous + DescribeClusterSecurityGroups request to indicate the first + security group that the current request will return. You can + specify either the **Marker** parameter or a + **ClusterSecurityGroupName** parameter, but not both. + + """ + params = {} + if cluster_security_group_name is not None: + params['ClusterSecurityGroupName'] = cluster_security_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_snapshots(self, cluster_identifier=None, + snapshot_identifier=None, + snapshot_type=None, start_time=None, + end_time=None, max_records=None, + marker=None, owner_account=None): + """ + Returns one or more snapshot objects, which contain metadata + about your cluster snapshots. By default, this operation + returns information about all snapshots of all clusters that + are owned by you AWS customer account. No information is + returned for snapshots owned by inactive AWS customer + accounts. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster for which + information about snapshots is requested. + + :type snapshot_identifier: string + :param snapshot_identifier: The snapshot identifier of the snapshot + about which to return information. + + :type snapshot_type: string + :param snapshot_type: The type of snapshots for which you are + requesting information. By default, snapshots of all types are + returned. + Valid Values: `automated` | `manual` + + :type start_time: timestamp + :param start_time: A value that requests only snapshots created at or + after the specified time. The time value is specified in ISO 8601 + format. For more information about ISO 8601, go to the `ISO8601 + Wikipedia page.`_ + Example: `2012-07-16T18:00:00Z` + + :type end_time: timestamp + :param end_time: A time value that requests only snapshots created at + or before the specified time. The time value is specified in ISO + 8601 format. For more information about ISO 8601, go to the + `ISO8601 Wikipedia page.`_ + Example: `2012-07-16T18:00:00Z` + + :type max_records: integer + :param max_records: The maximum number of snapshot records to include + in the response. If more records exist than the specified + `MaxRecords` value, the response returns a marker that you can use + in a subsequent DescribeClusterSnapshots request in order to + retrieve the next set of snapshot records. + Default: `100` + + Constraints: Must be at least 20 and no more than 100. + + :type marker: string + :param marker: An optional marker returned by a previous + DescribeClusterSnapshots request to indicate the first snapshot + that the request will return. + + :type owner_account: string + :param owner_account: The AWS customer account used to create or copy + the snapshot. Use this field to filter the results to snapshots + owned by a particular account. To describe snapshots you own, + either specify your AWS customer account, or do not specify the + parameter. + + """ + params = {} + if cluster_identifier is not None: + params['ClusterIdentifier'] = cluster_identifier + if snapshot_identifier is not None: + params['SnapshotIdentifier'] = snapshot_identifier + if snapshot_type is not None: + params['SnapshotType'] = snapshot_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if owner_account is not None: + params['OwnerAccount'] = owner_account + return self._make_request( + action='DescribeClusterSnapshots', + verb='POST', + path='/', params=params) + + def describe_cluster_subnet_groups(self, cluster_subnet_group_name=None, + max_records=None, marker=None): + """ + Returns one or more cluster subnet group objects, which + contain metadata about your cluster subnet groups. By default, + this operation returns information about all cluster subnet + groups that are defined in you AWS account. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the cluster subnet group + for which information is requested. + + :type max_records: integer + :param max_records: The maximum number of cluster subnet group records + to include in the response. If more records exist than the + specified `MaxRecords` value, the response returns a marker that + you can use in a subsequent DescribeClusterSubnetGroups request in + order to retrieve the next set of cluster subnet group records. + Default: 100 + + Constraints: Must be at least 20 and no more than 100. + + :type marker: string + :param marker: An optional marker returned by a previous + DescribeClusterSubnetGroups request to indicate the first cluster + subnet group that the current request will return. + + """ + params = {} + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_versions(self, cluster_version=None, + cluster_parameter_group_family=None, + max_records=None, marker=None): + """ + Returns descriptions of the available Amazon Redshift cluster + versions. You can call this operation even before creating any + clusters to learn more about the Amazon Redshift versions. For + more information about managing clusters, go to `Amazon + Redshift Clusters`_ in the Amazon Redshift Management Guide + + :type cluster_version: string + :param cluster_version: The specific cluster version to return. + Example: `1.0` + + :type cluster_parameter_group_family: string + :param cluster_parameter_group_family: + The name of a specific cluster parameter group family to return details + for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + marker is included in the response so that the following results + can be retrieved. + Default: `100` + + Constraints: Value must be at least 20 and no more than 100. + + :type marker: string + :param marker: The marker returned from a previous request. If this + parameter is specified, the response includes records beyond the + marker only, up to `MaxRecords`. + + """ + params = {} + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if cluster_parameter_group_family is not None: + params['ClusterParameterGroupFamily'] = cluster_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterVersions', + verb='POST', + path='/', params=params) + + def describe_clusters(self, cluster_identifier=None, max_records=None, + marker=None): + """ + Returns properties of provisioned clusters including general + cluster properties, cluster database properties, maintenance + and backup properties, and security and access properties. + This operation supports pagination. For more information about + managing clusters, go to `Amazon Redshift Clusters`_ in the + Amazon Redshift Management Guide . + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of a cluster whose + properties you are requesting. This parameter isn't case sensitive. + The default is that all clusters defined for an account are returned. + + :type max_records: integer + :param max_records: The maximum number of records that the response can + include. If more records exist than the specified `MaxRecords` + value, a `marker` is included in the response that can be used in a + new **DescribeClusters** request to continue listing results. + Default: `100` + + Constraints: Value must be at least 20 and no more than 100. + + :type marker: string + :param marker: An optional marker returned by a previous + **DescribeClusters** request to indicate the first cluster that the + current **DescribeClusters** request will return. + You can specify either a **Marker** parameter or a + **ClusterIdentifier** parameter in a **DescribeClusters** request, + but not both. + + """ + params = {} + if cluster_identifier is not None: + params['ClusterIdentifier'] = cluster_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusters', + verb='POST', + path='/', params=params) + + def describe_default_cluster_parameters(self, parameter_group_family, + max_records=None, marker=None): + """ + Returns a list of parameter settings for the specified + parameter group family. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_family: string + :param parameter_group_family: The name of the cluster parameter group + family. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results may be retrieved. + Default: `100` + + Constraints: Value must be at least 20 and no more than 100. + + :type marker: string + :param marker: An optional marker returned from a previous + **DescribeDefaultClusterParameters** request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = {'ParameterGroupFamily': parameter_group_family, } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDefaultClusterParameters', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + max_records=None, marker=None): + """ + Returns events related to clusters, security groups, + snapshots, and parameter groups for the past 14 days. Events + specific to a particular cluster, security group, snapshot or + parameter group can be obtained by providing the name as a + parameter. By default, the past hour of events are returned. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source for which events will be returned. + If this parameter is not specified, then all sources are included + in the response. + + Constraints: + + If SourceIdentifier is supplied, SourceType must also be provided. + + + + Specify a cluster identifier when SourceType is `cluster`. + + Specify a cluster security group name when SourceType is `cluster- + security-group`. + + Specify a cluster parameter group name when SourceType is `cluster- + parameter-group`. + + Specify a cluster snapshot identifier when SourceType is `cluster- + snapshot`. + + :type source_type: string + :param source_type: + The event source to retrieve events for. If no value is specified, all + events are returned. + + Constraints: + + If SourceType is supplied, SourceIdentifier must also be provided. + + + + Specify `cluster` when SourceIdentifier is a cluster identifier. + + Specify `cluster-security-group` when SourceIdentifier is a cluster + security group name. + + Specify `cluster-parameter-group` when SourceIdentifier is a cluster + parameter group name. + + Specify `cluster-snapshot` when SourceIdentifier is a cluster + snapshot identifier. + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. For more information + about ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: `2009-07-08T18:00Z` + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. For more information about + ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: `2009-07-08T18:00Z` + + :type duration: integer + :param duration: The number of minutes prior to the time of the request + for which to retrieve events. For example, if the request is sent + at 18:00 and you specify a duration of 60, then only events which + have occurred after 17:00 will be returned. + Default: `60` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results may be retrieved. + Default: `100` + + Constraints: Value must be at least 20 and no more than 100. + + :type marker: string + :param marker: An optional marker returned from a previous + **DescribeEvents** request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_orderable_cluster_options(self, cluster_version=None, + node_type=None, max_records=None, + marker=None): + """ + Returns a list of orderable cluster options. Before you create + a new cluster you can use this operation to find what options + are available, such as the EC2 Availability Zones (AZ) in the + specific AWS region that you can specify, and the node types + you can request. The node types differ by available storage, + memory, CPU and price. With the cost involved you might want + to obtain a list of cluster options in the specific region and + specify values when creating a cluster. For more information + about managing clusters, go to `Amazon Redshift Clusters`_ in + the Amazon Redshift Management Guide + + :type cluster_version: string + :param cluster_version: The version filter value. Specify this + parameter to show only the available offerings matching the + specified version. + Default: All versions. + + Constraints: Must be one of the version returned from + DescribeClusterVersions. + + :type node_type: string + :param node_type: The node type filter value. Specify this parameter to + show only the available offerings matching the specified node type. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results may be retrieved. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional marker returned from a previous + **DescribeOrderableClusterOptions** request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = {} + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if node_type is not None: + params['NodeType'] = node_type + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOrderableClusterOptions', + verb='POST', + path='/', params=params) + + def describe_reserved_node_offerings(self, + reserved_node_offering_id=None, + max_records=None, marker=None): + """ + Returns a list of the available reserved node offerings by + Amazon Redshift with their descriptions including the node + type, the fixed and recurring costs of reserving the node and + duration the node will be reserved for you. These descriptions + help you determine which reserve node offering you want to + purchase. You then use the unique offering ID in you call to + PurchaseReservedNodeOffering to reserve one or more nodes for + your Amazon Redshift cluster. + + For more information about managing parameter groups, go to + `Purchasing Reserved Nodes`_ in the Amazon Redshift Management + Guide . + + :type reserved_node_offering_id: string + :param reserved_node_offering_id: The unique identifier for the + offering. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results may be retrieved. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional marker returned by a previous + DescribeReservedNodeOfferings request to indicate the first + offering that the request will return. + You can specify either a **Marker** parameter or a + **ClusterIdentifier** parameter in a DescribeClusters request, but + not both. + + """ + params = {} + if reserved_node_offering_id is not None: + params['ReservedNodeOfferingId'] = reserved_node_offering_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedNodeOfferings', + verb='POST', + path='/', params=params) + + def describe_reserved_nodes(self, reserved_node_id=None, + max_records=None, marker=None): + """ + Returns the descriptions of the reserved nodes. + + :type reserved_node_id: string + :param reserved_node_id: Identifier for the node reservation. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results may be retrieved. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional marker returned by a previous + DescribeReservedNodes request to indicate the first parameter group + that the current request will return. + + """ + params = {} + if reserved_node_id is not None: + params['ReservedNodeId'] = reserved_node_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedNodes', + verb='POST', + path='/', params=params) + + def describe_resize(self, cluster_identifier): + """ + Returns information about the last resize operation for the + specified cluster. If no resize operation has ever been + initiated for the specified cluster, a `HTTP 404` error is + returned. If a resize operation was initiated and completed, + the status of the resize remains as `SUCCEEDED` until the next + resize. + + A resize operation can be requested using ModifyCluster and + specifying a different number or type of nodes for the + cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of a cluster whose + resize progress you are requesting. This parameter isn't case- + sensitive. + By default, resize operations for all clusters defined for an AWS + account are returned. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DescribeResize', + verb='POST', + path='/', params=params) + + def modify_cluster(self, cluster_identifier, cluster_type=None, + node_type=None, number_of_nodes=None, + cluster_security_groups=None, + vpc_security_group_ids=None, + master_user_password=None, + cluster_parameter_group_name=None, + automated_snapshot_retention_period=None, + preferred_maintenance_window=None, + cluster_version=None, allow_version_upgrade=None): + """ + Modifies the settings for a cluster. For example, you can add + another security or parameter group, update the preferred + maintenance window, or change the master user password. + Resetting a cluster password or modifying the security groups + associated with a cluster do not need a reboot. However, + modifying parameter group requires a reboot for parameters to + take effect. For more information about managing clusters, go + to `Amazon Redshift Clusters`_ in the Amazon Redshift + Management Guide + + You can also change node type and the number of nodes to scale + up or down the cluster. When resizing a cluster, you must + specify both the number of nodes and the node type even if one + of the parameters does not change. If you specify the same + number of nodes and node type that are already configured for + the cluster, an error is returned. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster to be + modified. + Example: `examplecluster` + + :type cluster_type: string + :param cluster_type: The new cluster type. + When you submit your cluster resize request, your existing cluster goes + into a read-only mode. After Amazon Redshift provisions a new + cluster based on your resize requirements, there will be outage for + a period while the old cluster is deleted and your connection is + switched to the new cluster. You can use DescribeResize to track + the progress of the resize request. + + Valid Values: ` multi-node | single-node ` + + :type node_type: string + :param node_type: The new node type of the cluster. If you specify a + new node type, you must also specify the number of nodes parameter + also. + When you submit your request to resize a cluster, Amazon Redshift sets + access permissions for the cluster to read-only. After Amazon + Redshift provisions a new cluster according to your resize + requirements, there will be a temporary outage while the old + cluster is deleted and your connection is switched to the new + cluster. When the new connection is complete, the original access + permissions for the cluster are restored. You can use the + DescribeResize to track the progress of the resize request. + + Valid Values: ` dw.hs1.xlarge` | `dw.hs1.8xlarge` + + :type number_of_nodes: integer + :param number_of_nodes: The new number of nodes of the cluster. If you + specify a new number of nodes, you must also specify the node type + parameter also. + When you submit your request to resize a cluster, Amazon Redshift sets + access permissions for the cluster to read-only. After Amazon + Redshift provisions a new cluster according to your resize + requirements, there will be a temporary outage while the old + cluster is deleted and your connection is switched to the new + cluster. When the new connection is complete, the original access + permissions for the cluster are restored. You can use + DescribeResize to track the progress of the resize request. + + Valid Values: Integer greater than `0`. + + :type cluster_security_groups: list + :param cluster_security_groups: + A list of cluster security groups to be authorized on this cluster. + This change is asynchronously applied as soon as possible. + + Security groups currently associated with the cluster and not in the + list of groups to apply, will be revoked from the cluster. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of Virtual Private Cloud (VPC) + security groups to be associated with the cluster. + + :type master_user_password: string + :param master_user_password: + The new password for the cluster master user. This change is + asynchronously applied as soon as possible. Between the time of the + request and the completion of the request, the `MasterUserPassword` + element exists in the `PendingModifiedValues` element of the + operation response. + Operations never return the password, so this operation provides a way + to regain access to the master user account for a cluster if the + password is lost. + + + Default: Uses existing setting. + + Constraints: + + + + Must be between 8 and 64 characters in length. + + Must contain at least one uppercase letter. + + Must contain at least one lowercase letter. + + Must contain one number. + + Can be any printable ASCII character (ASCII code 33 to 126) except ' + (single quote), " (double quote), \, /, @, or space. + + :type cluster_parameter_group_name: string + :param cluster_parameter_group_name: The name of the cluster parameter + group to apply to this cluster. This change is applied only after + the cluster is rebooted. To reboot a cluster use RebootCluster. + Default: Uses existing setting. + + Constraints: The cluster parameter group must be in the same parameter + group family that matches the cluster version. + + :type automated_snapshot_retention_period: integer + :param automated_snapshot_retention_period: The number of days that + automated snapshots are retained. If the value is 0, automated + snapshots are disabled. Even if automated snapshots are disabled, + you can still create manual snapshots when you want with + CreateClusterSnapshot. + If you decrease the automated snapshot retention period from its + current value, existing automated snapshots which fall outside of + the new retention period will be immediately deleted. + + Default: Uses existing setting. + + Constraints: Must be a value from 0 to 35. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur, if necessary. If system + maintenance is necessary during the window, it may result in an + outage. + This maintenance window change is made immediately. If the new + maintenance window indicates the current time, there must be at + least 120 minutes between the current time and end of the window in + order to ensure that pending changes are applied. + + Default: Uses existing setting. + + Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`. + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Must be at least 30 minutes. + + :type cluster_version: string + :param cluster_version: The new version number of the Amazon Redshift + engine to upgrade to. + For major version upgrades, if a non-default cluster parameter group is + currently in use, a new cluster parameter group in the cluster + parameter group family for the new version must be specified. The + new cluster parameter group can be the default for that cluster + parameter group family. For more information about managing + parameter groups, go to `Amazon Redshift Parameter Groups`_ in the + Amazon Redshift Management Guide . + + Example: `1.0` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades will be applied + automatically to the cluster during the maintenance window. + Default: `False` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + if cluster_type is not None: + params['ClusterType'] = cluster_type + if node_type is not None: + params['NodeType'] = node_type + if number_of_nodes is not None: + params['NumberOfNodes'] = number_of_nodes + if cluster_security_groups is not None: + self.build_list_params(params, + cluster_security_groups, + 'ClusterSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if master_user_password is not None: + params['MasterUserPassword'] = master_user_password + if cluster_parameter_group_name is not None: + params['ClusterParameterGroupName'] = cluster_parameter_group_name + if automated_snapshot_retention_period is not None: + params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + return self._make_request( + action='ModifyCluster', + verb='POST', + path='/', params=params) + + def modify_cluster_parameter_group(self, parameter_group_name, + parameters): + """ + Modifies the parameters of a parameter group. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of the parameter group to be + modified. + + :type parameters: list + :param parameters: An array of parameters to be modified. A maximum of + 20 parameters can be modified in a single request. + For each parameter to be modified, you must supply at least the + parameter name and parameter value; other name-value pairs of the + parameter are optional. + + """ + params = {'ParameterGroupName': parameter_group_name, } + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion')) + return self._make_request( + action='ModifyClusterParameterGroup', + verb='POST', + path='/', params=params) + + def modify_cluster_subnet_group(self, cluster_subnet_group_name, + subnet_ids, description=None): + """ + Modifies a cluster subnet group to include the specified list + of VPC subnets. The operation replaces the existing list of + subnets with the new list of subnets. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the subnet group to be + modified. + + :type description: string + :param description: A text description of the subnet group to be + modified. + + :type subnet_ids: list + :param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets + can be modified in a single request. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if description is not None: + params['Description'] = description + return self._make_request( + action='ModifyClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def purchase_reserved_node_offering(self, reserved_node_offering_id, + node_count=None): + """ + Allows you to purchase reserved nodes. Amazon Redshift offers + a predefined set of reserved node offerings. You can purchase + one of the offerings. You can call the + DescribeReservedNodeOfferings API to obtain the available + reserved node offerings. You can call this API by providing a + specific reserved node offering and the number of nodes you + want to reserve. + + For more information about managing parameter groups, go to + `Purchasing Reserved Nodes`_ in the Amazon Redshift Management + Guide . + + :type reserved_node_offering_id: string + :param reserved_node_offering_id: The unique identifier of the reserved + node offering you want to purchase. + + :type node_count: integer + :param node_count: The number of reserved nodes you want to purchase. + Default: `1` + + """ + params = { + 'ReservedNodeOfferingId': reserved_node_offering_id, + } + if node_count is not None: + params['NodeCount'] = node_count + return self._make_request( + action='PurchaseReservedNodeOffering', + verb='POST', + path='/', params=params) + + def reboot_cluster(self, cluster_identifier): + """ + Reboots a cluster. This action is taken as soon as possible. + It results in a momentary outage to the cluster, during which + the cluster status is set to `rebooting`. A cluster event is + created when the reboot is completed. Any pending cluster + modifications (see ModifyCluster) are applied at this reboot. + For more information about managing clusters, go to `Amazon + Redshift Clusters`_ in the Amazon Redshift Management Guide + + :type cluster_identifier: string + :param cluster_identifier: The cluster identifier. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='RebootCluster', + verb='POST', + path='/', params=params) + + def reset_cluster_parameter_group(self, parameter_group_name, + reset_all_parameters=None, + parameters=None): + """ + Sets one or more parameters of the specified parameter group + to their default values and sets the source values of the + parameters to "engine-default". To reset the entire parameter + group specify the ResetAllParameters parameter. For parameter + changes to take effect you must reboot any associated + clusters. + + :type parameter_group_name: string + :param parameter_group_name: The name of the cluster parameter group to + be reset. + + :type reset_all_parameters: boolean + :param reset_all_parameters: If `True`, all parameters in the specified + parameter group will be reset to their default values. + Default: `True` + + :type parameters: list + :param parameters: An array of names of parameters to be reset. If + ResetAllParameters option is not used, then at least one parameter + name must be supplied. + Constraints: A maximum of 20 parameters can be reset in a single + request. + + """ + params = {'ParameterGroupName': parameter_group_name, } + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion')) + return self._make_request( + action='ResetClusterParameterGroup', + verb='POST', + path='/', params=params) + + def restore_from_cluster_snapshot(self, cluster_identifier, + snapshot_identifier, + snapshot_cluster_identifier=None, + port=None, availability_zone=None, + allow_version_upgrade=None, + cluster_subnet_group_name=None, + publicly_accessible=None, + owner_account=None): + """ + Creates a new cluster from a snapshot. Amazon Redshift creates + the resulting cluster with the same configuration as the + original cluster from which the snapshot was created, except + that the new cluster is created with the default cluster + security and parameter group. After Amazon Redshift creates + the cluster you can use the ModifyCluster API to associate a + different security group and different parameter group with + the restored cluster. + + If a snapshot is taken of a cluster in VPC, you can restore it + only in VPC. In this case, you must provide a cluster subnet + group where you want the cluster restored. If snapshot is + taken of a cluster outside VPC, then you can restore it only + outside VPC. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster that will be + created from restoring the snapshot. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + Alphabetic characters must be lowercase. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for all clusters within an AWS account. + + :type snapshot_identifier: string + :param snapshot_identifier: The name of the snapshot from which to + create the new cluster. This parameter isn't case sensitive. + Example: `my-snapshot-id` + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: + + :type port: integer + :param port: The port number on which the cluster accepts connections. + Default: The same port as the original cluster. + + Constraints: Must be between `1115` and `65535`. + + :type availability_zone: string + :param availability_zone: The Amazon EC2 Availability Zone in which to + restore the cluster. + Default: A random, system-chosen Availability Zone. + + Example: `us-east-1a` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. + Default: `True` + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the subnet group where + you want to cluster restored. + A snapshot of cluster in VPC can be restored only in VPC. Therefore, + you must provide subnet group name where you want the cluster + restored. + + :type publicly_accessible: boolean + :param publicly_accessible: If `True`, the cluster can be accessed from + a public network. + + :type owner_account: string + :param owner_account: The AWS customer account used to create or copy + the snapshot. Required if you are restoring a snapshot you do not + own, optional if you own the snapshot. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'SnapshotIdentifier': snapshot_identifier, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if owner_account is not None: + params['OwnerAccount'] = owner_account + return self._make_request( + action='RestoreFromClusterSnapshot', + verb='POST', + path='/', params=params) + + def revoke_cluster_security_group_ingress(self, + cluster_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Revokes an ingress rule in an Amazon Redshift security group + for a previously authorized IP range or Amazon EC2 security + group. To add an ingress rule, see + AuthorizeClusterSecurityGroupIngress. For information about + managing security groups, go to`Amazon Redshift Cluster + Security Groups`_ in the Amazon Redshift Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the security Group from + which to revoke the ingress rule. + + :type cidrip: string + :param cidrip: The IP range for which to revoke access. This range must + be a valid Classless Inter-Domain Routing (CIDR) block of IP + addresses. If `CIDRIP` is specified, `EC2SecurityGroupName` and + `EC2SecurityGroupOwnerId` cannot be provided. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 Security Group + whose access is to be revoked. If `EC2SecurityGroupName` is + specified, `EC2SecurityGroupOwnerId` must also be provided and + `CIDRIP` cannot be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the owner + of the security group specified in the `EC2SecurityGroupName` + parameter. The AWS access key ID is not an acceptable value. If + `EC2SecurityGroupOwnerId` is specified, `EC2SecurityGroupName` must + also be provided. and `CIDRIP` cannot be provided. + Example: `111122223333` + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='RevokeClusterSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def revoke_snapshot_access(self, snapshot_identifier, + account_with_restore_access, + snapshot_cluster_identifier=None): + """ + Removes the ability of the specified AWS customer account to + restore the specified snapshot. If the account is currently + restoring the snapshot, the restore will run to completion. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: The identifier of the snapshot that the + account can no longer access. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: + + :type account_with_restore_access: string + :param account_with_restore_access: The identifier of the AWS customer + account that can no longer restore the specified snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'AccountWithRestoreAccess': account_with_restore_access, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='RevokeSnapshotAccess', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/awx/lib/site-packages/boto/regioninfo.py b/awx/lib/site-packages/boto/regioninfo.py new file mode 100644 index 0000000000..6e936b3793 --- /dev/null +++ b/awx/lib/site-packages/boto/regioninfo.py @@ -0,0 +1,63 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class RegionInfo(object): + """ + Represents an AWS Region + """ + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + self.connection = connection + self.name = name + self.endpoint = endpoint + self.connection_cls = connection_cls + + def __repr__(self): + return 'RegionInfo:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'regionName': + self.name = value + elif name == 'regionEndpoint': + self.endpoint = value + else: + setattr(self, name, value) + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(region=self, **kw_params) diff --git a/awx/lib/site-packages/boto/resultset.py b/awx/lib/site-packages/boto/resultset.py new file mode 100644 index 0000000000..f89ddbc032 --- /dev/null +++ b/awx/lib/site-packages/boto/resultset.py @@ -0,0 +1,169 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + +class ResultSet(list): + """ + The ResultSet is used to pass results back from the Amazon services + to the client. It is light wrapper around Python's :py:class:`list` class, + with some additional methods for parsing XML results from AWS. + Because I don't really want any dependencies on external libraries, + I'm using the standard SAX parser that comes with Python. The good news is + that it's quite fast and efficient but it makes some things rather + difficult. + + You can pass in, as the marker_elem parameter, a list of tuples. + Each tuple contains a string as the first element which represents + the XML element that the resultset needs to be on the lookout for + and a Python class as the second element of the tuple. Each time the + specified element is found in the XML, a new instance of the class + will be created and popped onto the stack. + + :ivar str next_token: A hash used to assist in paging through very long + result sets. In most cases, passing this value to certain methods + will give you another 'page' of results. + """ + def __init__(self, marker_elem=None): + list.__init__(self) + if isinstance(marker_elem, list): + self.markers = marker_elem + else: + self.markers = [] + self.marker = None + self.key_marker = None + self.next_marker = None # avail when delimiter used + self.next_key_marker = None + self.next_upload_id_marker = None + self.next_version_id_marker = None + self.next_generation_marker= None + self.version_id_marker = None + self.is_truncated = False + self.next_token = None + self.status = True + + def startElement(self, name, attrs, connection): + for t in self.markers: + if name == t[0]: + obj = t[1](connection) + self.append(obj) + return obj + if name == 'Owner': + # Makes owner available for get_service and + # perhaps other lists where not handled by + # another element. + self.owner = User() + return self.owner + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'IsTruncated': + self.is_truncated = self.to_boolean(value) + elif name == 'Marker': + self.marker = value + elif name == 'KeyMarker': + self.key_marker = value + elif name == 'NextMarker': + self.next_marker = value + elif name == 'NextKeyMarker': + self.next_key_marker = value + elif name == 'VersionIdMarker': + self.version_id_marker = value + elif name == 'NextVersionIdMarker': + self.next_version_id_marker = value + elif name == 'NextGenerationMarker': + self.next_generation_marker = value + elif name == 'UploadIdMarker': + self.upload_id_marker = value + elif name == 'NextUploadIdMarker': + self.next_upload_id_marker = value + elif name == 'Bucket': + self.bucket = value + elif name == 'MaxUploads': + self.max_uploads = int(value) + elif name == 'MaxItems': + self.max_items = int(value) + elif name == 'Prefix': + self.prefix = value + elif name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'ItemName': + self.append(value) + elif name == 'NextToken': + self.next_token = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + else: + setattr(self, name, value) + +class BooleanResult(object): + + def __init__(self, marker_elem=None): + self.status = True + self.request_id = None + self.box_usage = None + + def __repr__(self): + if self.status: + return 'True' + else: + return 'False' + + def __nonzero__(self): + return self.status + + def startElement(self, name, attrs, connection): + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + elif name == 'RequestId': + self.request_id = value + elif name == 'requestId': + self.request_id = value + elif name == 'BoxUsage': + self.request_id = value + else: + setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/roboto/__init__.py b/awx/lib/site-packages/boto/roboto/__init__.py new file mode 100644 index 0000000000..792d600548 --- /dev/null +++ b/awx/lib/site-packages/boto/roboto/__init__.py @@ -0,0 +1 @@ +# diff --git a/awx/lib/site-packages/boto/roboto/awsqueryrequest.py b/awx/lib/site-packages/boto/roboto/awsqueryrequest.py new file mode 100644 index 0000000000..6d9507189e --- /dev/null +++ b/awx/lib/site-packages/boto/roboto/awsqueryrequest.py @@ -0,0 +1,504 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import sys +import os +import boto +import optparse +import copy +import boto.exception +import boto.roboto.awsqueryservice + +import bdb +import traceback +try: + import epdb as debugger +except ImportError: + import pdb as debugger + +def boto_except_hook(debugger_flag, debug_flag): + def excepthook(typ, value, tb): + if typ is bdb.BdbQuit: + sys.exit(1) + sys.excepthook = sys.__excepthook__ + + if debugger_flag and sys.stdout.isatty() and sys.stdin.isatty(): + if debugger.__name__ == 'epdb': + debugger.post_mortem(tb, typ, value) + else: + debugger.post_mortem(tb) + elif debug_flag: + print traceback.print_tb(tb) + sys.exit(1) + else: + print value + sys.exit(1) + + return excepthook + +class Line(object): + + def __init__(self, fmt, data, label): + self.fmt = fmt + self.data = data + self.label = label + self.line = '%s\t' % label + self.printed = False + + def append(self, datum): + self.line += '%s\t' % datum + + def print_it(self): + if not self.printed: + print self.line + self.printed = True + +class RequiredParamError(boto.exception.BotoClientError): + + def __init__(self, required): + self.required = required + s = 'Required parameters are missing: %s' % self.required + boto.exception.BotoClientError.__init__(self, s) + +class EncoderError(boto.exception.BotoClientError): + + def __init__(self, error_msg): + s = 'Error encoding value (%s)' % error_msg + boto.exception.BotoClientError.__init__(self, s) + +class FilterError(boto.exception.BotoClientError): + + def __init__(self, filters): + self.filters = filters + s = 'Unknown filters: %s' % self.filters + boto.exception.BotoClientError.__init__(self, s) + +class Encoder: + + @classmethod + def encode(cls, p, rp, v, label=None): + if p.name.startswith('_'): + return + try: + mthd = getattr(cls, 'encode_'+p.ptype) + mthd(p, rp, v, label) + except AttributeError: + raise EncoderError('Unknown type: %s' % p.ptype) + + @classmethod + def encode_string(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = v + + encode_file = encode_string + encode_enum = encode_string + + @classmethod + def encode_integer(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = '%d' % v + + @classmethod + def encode_boolean(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + if v: + v = 'true' + else: + v = 'false' + rp[label] = v + + @classmethod + def encode_datetime(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = v + + @classmethod + def encode_array(cls, p, rp, v, l): + v = boto.utils.mklist(v) + if l: + label = l + else: + label = p.name + label = label + '.%d' + for i, value in enumerate(v): + rp[label%(i+1)] = value + +class AWSQueryRequest(object): + + ServiceClass = None + + Description = '' + Params = [] + Args = [] + Filters = [] + Response = {} + + CLITypeMap = {'string' : 'string', + 'integer' : 'int', + 'int' : 'int', + 'enum' : 'choice', + 'datetime' : 'string', + 'dateTime' : 'string', + 'file' : 'string', + 'boolean' : None} + + @classmethod + def name(cls): + return cls.__name__ + + def __init__(self, **args): + self.args = args + self.parser = None + self.cli_options = None + self.cli_args = None + self.cli_output_format = None + self.connection = None + self.list_markers = [] + self.item_markers = [] + self.request_params = {} + self.connection_args = None + + def __repr__(self): + return self.name() + + def get_connection(self, **args): + if self.connection is None: + self.connection = self.ServiceClass(**args) + return self.connection + + @property + def status(self): + retval = None + if self.http_response is not None: + retval = self.http_response.status + return retval + + @property + def reason(self): + retval = None + if self.http_response is not None: + retval = self.http_response.reason + return retval + + @property + def request_id(self): + retval = None + if self.aws_response is not None: + retval = getattr(self.aws_response, 'requestId') + return retval + + def process_filters(self): + filters = self.args.get('filters', []) + filter_names = [f['name'] for f in self.Filters] + unknown_filters = [f for f in filters if f not in filter_names] + if unknown_filters: + raise FilterError('Unknown filters: %s' % unknown_filters) + for i, filter in enumerate(self.Filters): + name = filter['name'] + if name in filters: + self.request_params['Filter.%d.Name' % (i+1)] = name + for j, value in enumerate(boto.utils.mklist(filters[name])): + Encoder.encode(filter, self.request_params, value, + 'Filter.%d.Value.%d' % (i+1, j+1)) + + def process_args(self, **args): + """ + Responsible for walking through Params defined for the request and: + + * Matching them with keyword parameters passed to the request + constructor or via the command line. + * Checking to see if all required parameters have been specified + and raising an exception, if not. + * Encoding each value into the set of request parameters that will + be sent in the request to the AWS service. + """ + self.args.update(args) + self.connection_args = copy.copy(self.args) + if 'debug' in self.args and self.args['debug'] >= 2: + boto.set_stream_logger(self.name()) + required = [p.name for p in self.Params+self.Args if not p.optional] + for param in self.Params+self.Args: + if param.long_name: + python_name = param.long_name.replace('-', '_') + else: + python_name = boto.utils.pythonize_name(param.name, '_') + value = None + if python_name in self.args: + value = self.args[python_name] + if value is None: + value = param.default + if value is not None: + if param.name in required: + required.remove(param.name) + if param.request_param: + if param.encoder: + param.encoder(param, self.request_params, value) + else: + Encoder.encode(param, self.request_params, value) + if python_name in self.args: + del self.connection_args[python_name] + if required: + l = [] + for p in self.Params+self.Args: + if p.name in required: + if p.short_name and p.long_name: + l.append('(%s, %s)' % (p.optparse_short_name, + p.optparse_long_name)) + elif p.short_name: + l.append('(%s)' % p.optparse_short_name) + else: + l.append('(%s)' % p.optparse_long_name) + raise RequiredParamError(','.join(l)) + boto.log.debug('request_params: %s' % self.request_params) + self.process_markers(self.Response) + + def process_markers(self, fmt, prev_name=None): + if fmt and fmt['type'] == 'object': + for prop in fmt['properties']: + self.process_markers(prop, fmt['name']) + elif fmt and fmt['type'] == 'array': + self.list_markers.append(prev_name) + self.item_markers.append(fmt['name']) + + def send(self, verb='GET', **args): + self.process_args(**args) + self.process_filters() + conn = self.get_connection(**self.connection_args) + self.http_response = conn.make_request(self.name(), + self.request_params, + verb=verb) + self.body = self.http_response.read() + boto.log.debug(self.body) + if self.http_response.status == 200: + self.aws_response = boto.jsonresponse.Element(list_marker=self.list_markers, + item_marker=self.item_markers) + h = boto.jsonresponse.XmlHandler(self.aws_response, self) + h.parse(self.body) + return self.aws_response + else: + boto.log.error('%s %s' % (self.http_response.status, + self.http_response.reason)) + boto.log.error('%s' % self.body) + raise conn.ResponseError(self.http_response.status, + self.http_response.reason, + self.body) + + def add_standard_options(self): + group = optparse.OptionGroup(self.parser, 'Standard Options') + # add standard options that all commands get + group.add_option('-D', '--debug', action='store_true', + help='Turn on all debugging output') + group.add_option('--debugger', action='store_true', + default=False, + help='Enable interactive debugger on error') + group.add_option('-U', '--url', action='store', + help='Override service URL with value provided') + group.add_option('--region', action='store', + help='Name of the region to connect to') + group.add_option('-I', '--access-key-id', action='store', + help='Override access key value') + group.add_option('-S', '--secret-key', action='store', + help='Override secret key value') + group.add_option('--version', action='store_true', + help='Display version string') + if self.Filters: + self.group.add_option('--help-filters', action='store_true', + help='Display list of available filters') + self.group.add_option('--filter', action='append', + metavar=' name=value', + help='A filter for limiting the results') + self.parser.add_option_group(group) + + def process_standard_options(self, options, args, d): + if hasattr(options, 'help_filters') and options.help_filters: + print 'Available filters:' + for filter in self.Filters: + print '%s\t%s' % (filter.name, filter.doc) + sys.exit(0) + if options.debug: + self.args['debug'] = 2 + if options.url: + self.args['url'] = options.url + if options.region: + self.args['region'] = options.region + if options.access_key_id: + self.args['aws_access_key_id'] = options.access_key_id + if options.secret_key: + self.args['aws_secret_access_key'] = options.secret_key + if options.version: + # TODO - Where should the version # come from? + print 'version x.xx' + exit(0) + sys.excepthook = boto_except_hook(options.debugger, + options.debug) + + def get_usage(self): + s = 'usage: %prog [options] ' + l = [ a.long_name for a in self.Args ] + s += ' '.join(l) + for a in self.Args: + if a.doc: + s += '\n\n\t%s - %s' % (a.long_name, a.doc) + return s + + def build_cli_parser(self): + self.parser = optparse.OptionParser(description=self.Description, + usage=self.get_usage()) + self.add_standard_options() + for param in self.Params: + ptype = action = choices = None + if param.ptype in self.CLITypeMap: + ptype = self.CLITypeMap[param.ptype] + action = 'store' + if param.ptype == 'boolean': + action = 'store_true' + elif param.ptype == 'array': + if len(param.items) == 1: + ptype = param.items[0]['type'] + action = 'append' + elif param.cardinality != 1: + action = 'append' + if ptype or action == 'store_true': + if param.short_name: + self.parser.add_option(param.optparse_short_name, + param.optparse_long_name, + action=action, type=ptype, + choices=param.choices, + help=param.doc) + elif param.long_name: + self.parser.add_option(param.optparse_long_name, + action=action, type=ptype, + choices=param.choices, + help=param.doc) + + def do_cli(self): + if not self.parser: + self.build_cli_parser() + self.cli_options, self.cli_args = self.parser.parse_args() + d = {} + self.process_standard_options(self.cli_options, self.cli_args, d) + for param in self.Params: + if param.long_name: + p_name = param.long_name.replace('-', '_') + else: + p_name = boto.utils.pythonize_name(param.name) + value = getattr(self.cli_options, p_name) + if param.ptype == 'file' and value: + if value == '-': + value = sys.stdin.read() + else: + path = os.path.expanduser(value) + path = os.path.expandvars(path) + if os.path.isfile(path): + fp = open(path) + value = fp.read() + fp.close() + else: + self.parser.error('Unable to read file: %s' % path) + d[p_name] = value + for arg in self.Args: + if arg.long_name: + p_name = arg.long_name.replace('-', '_') + else: + p_name = boto.utils.pythonize_name(arg.name) + value = None + if arg.cardinality == 1: + if len(self.cli_args) >= 1: + value = self.cli_args[0] + else: + value = self.cli_args + d[p_name] = value + self.args.update(d) + if hasattr(self.cli_options, 'filter') and self.cli_options.filter: + d = {} + for filter in self.cli_options.filter: + name, value = filter.split('=') + d[name] = value + if 'filters' in self.args: + self.args['filters'].update(d) + else: + self.args['filters'] = d + try: + response = self.main() + self.cli_formatter(response) + except RequiredParamError, e: + print e + sys.exit(1) + except self.ServiceClass.ResponseError, err: + print 'Error(%s): %s' % (err.error_code, err.error_message) + sys.exit(1) + except boto.roboto.awsqueryservice.NoCredentialsError, err: + print 'Unable to find credentials.' + sys.exit(1) + except Exception, e: + print e + sys.exit(1) + + def _generic_cli_formatter(self, fmt, data, label=''): + if fmt['type'] == 'object': + for prop in fmt['properties']: + if 'name' in fmt: + if fmt['name'] in data: + data = data[fmt['name']] + if fmt['name'] in self.list_markers: + label = fmt['name'] + if label[-1] == 's': + label = label[0:-1] + label = label.upper() + self._generic_cli_formatter(prop, data, label) + elif fmt['type'] == 'array': + for item in data: + line = Line(fmt, item, label) + if isinstance(item, dict): + for field_name in item: + line.append(item[field_name]) + elif isinstance(item, basestring): + line.append(item) + line.print_it() + + def cli_formatter(self, data): + """ + This method is responsible for formatting the output for the + command line interface. The default behavior is to call the + generic CLI formatter which attempts to print something + reasonable. If you want specific formatting, you should + override this method and do your own thing. + + :type data: dict + :param data: The data returned by AWS. + """ + if data: + self._generic_cli_formatter(self.Response, data) + + diff --git a/awx/lib/site-packages/boto/roboto/awsqueryservice.py b/awx/lib/site-packages/boto/roboto/awsqueryservice.py new file mode 100644 index 0000000000..0ca78c2d7f --- /dev/null +++ b/awx/lib/site-packages/boto/roboto/awsqueryservice.py @@ -0,0 +1,121 @@ +import os +import urlparse +import boto +import boto.connection +import boto.jsonresponse +import boto.exception +import awsqueryrequest + +class NoCredentialsError(boto.exception.BotoClientError): + + def __init__(self): + s = 'Unable to find credentials' + boto.exception.BotoClientError.__init__(self, s) + +class AWSQueryService(boto.connection.AWSQueryConnection): + + Name = '' + Description = '' + APIVersion = '' + Authentication = 'sign-v2' + Path = '/' + Port = 443 + Provider = 'aws' + EnvURL = 'AWS_URL' + + Regions = [] + + def __init__(self, **args): + self.args = args + self.check_for_credential_file() + self.check_for_env_url() + if 'host' not in self.args: + if self.Regions: + region_name = self.args.get('region_name', + self.Regions[0]['name']) + for region in self.Regions: + if region['name'] == region_name: + self.args['host'] = region['endpoint'] + if 'path' not in self.args: + self.args['path'] = self.Path + if 'port' not in self.args: + self.args['port'] = self.Port + try: + boto.connection.AWSQueryConnection.__init__(self, **self.args) + self.aws_response = None + except boto.exception.NoAuthHandlerFound: + raise NoCredentialsError() + + def check_for_credential_file(self): + """ + Checks for the existance of an AWS credential file. + If the environment variable AWS_CREDENTIAL_FILE is + set and points to a file, that file will be read and + will be searched credentials. + Note that if credentials have been explicitelypassed + into the class constructor, those values always take + precedence. + """ + if 'AWS_CREDENTIAL_FILE' in os.environ: + path = os.environ['AWS_CREDENTIAL_FILE'] + path = os.path.expanduser(path) + path = os.path.expandvars(path) + if os.path.isfile(path): + fp = open(path) + lines = fp.readlines() + fp.close() + for line in lines: + if line[0] != '#': + if '=' in line: + name, value = line.split('=', 1) + if name.strip() == 'AWSAccessKeyId': + if 'aws_access_key_id' not in self.args: + value = value.strip() + self.args['aws_access_key_id'] = value + elif name.strip() == 'AWSSecretKey': + if 'aws_secret_access_key' not in self.args: + value = value.strip() + self.args['aws_secret_access_key'] = value + else: + print 'Warning: unable to read AWS_CREDENTIAL_FILE' + + def check_for_env_url(self): + """ + First checks to see if a url argument was explicitly passed + in. If so, that will be used. If not, it checks for the + existence of the environment variable specified in ENV_URL. + If this is set, it should contain a fully qualified URL to the + service you want to use. + Note that any values passed explicitly to the class constructor + will take precedence. + """ + url = self.args.get('url', None) + if url: + del self.args['url'] + if not url and self.EnvURL in os.environ: + url = os.environ[self.EnvURL] + if url: + rslt = urlparse.urlparse(url) + if 'is_secure' not in self.args: + if rslt.scheme == 'https': + self.args['is_secure'] = True + else: + self.args['is_secure'] = False + + host = rslt.netloc + port = None + l = host.split(':') + if len(l) > 1: + host = l[0] + port = int(l[1]) + if 'host' not in self.args: + self.args['host'] = host + if port and 'port' not in self.args: + self.args['port'] = port + + if rslt.path and 'path' not in self.args: + self.args['path'] = rslt.path + + def _required_auth_capability(self): + return [self.Authentication] + diff --git a/awx/lib/site-packages/boto/roboto/param.py b/awx/lib/site-packages/boto/roboto/param.py new file mode 100644 index 0000000000..61364003a7 --- /dev/null +++ b/awx/lib/site-packages/boto/roboto/param.py @@ -0,0 +1,147 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os + +class Converter(object): + + @classmethod + def convert_string(cls, param, value): + # TODO: could do length validation, etc. here + if not isinstance(value, basestring): + raise ValueError + return value + + @classmethod + def convert_integer(cls, param, value): + # TODO: could do range checking here + return int(value) + + @classmethod + def convert_boolean(cls, param, value): + """ + For command line arguments, just the presence + of the option means True so just return True + """ + return True + + @classmethod + def convert_file(cls, param, value): + if os.path.isfile(value): + return value + raise ValueError + + @classmethod + def convert_dir(cls, param, value): + if os.path.isdir(value): + return value + raise ValueError + + @classmethod + def convert(cls, param, value): + try: + if hasattr(cls, 'convert_'+param.ptype): + mthd = getattr(cls, 'convert_'+param.ptype) + else: + mthd = cls.convert_string + return mthd(param, value) + except: + raise ValidationException(param, '') + +class Param(object): + + def __init__(self, name=None, ptype='string', optional=True, + short_name=None, long_name=None, doc='', + metavar=None, cardinality=1, default=None, + choices=None, encoder=None, request_param=True): + self.name = name + self.ptype = ptype + self.optional = optional + self.short_name = short_name + self.long_name = long_name + self.doc = doc + self.metavar = metavar + self.cardinality = cardinality + self.default = default + self.choices = choices + self.encoder = encoder + self.request_param = request_param + + @property + def optparse_long_name(self): + ln = None + if self.long_name: + ln = '--%s' % self.long_name + return ln + + @property + def synopsis_long_name(self): + ln = None + if self.long_name: + ln = '--%s' % self.long_name + return ln + + @property + def getopt_long_name(self): + ln = None + if self.long_name: + ln = '%s' % self.long_name + if self.ptype != 'boolean': + ln += '=' + return ln + + @property + def optparse_short_name(self): + sn = None + if self.short_name: + sn = '-%s' % self.short_name + return sn + + @property + def synopsis_short_name(self): + sn = None + if self.short_name: + sn = '-%s' % self.short_name + return sn + + @property + def getopt_short_name(self): + sn = None + if self.short_name: + sn = '%s' % self.short_name + if self.ptype != 'boolean': + sn += ':' + return sn + + def convert(self, value): + """ + Convert a string value as received in the command line + tools and convert to the appropriate type of value. + Raise a ValidationError if the value can't be converted. + + :type value: str + :param value: The value to convert. This should always + be a string. + """ + return Converter.convert(self, value) + + diff --git a/awx/lib/site-packages/boto/route53/__init__.py b/awx/lib/site-packages/boto/route53/__init__.py new file mode 100644 index 0000000000..3546d25d36 --- /dev/null +++ b/awx/lib/site-packages/boto/route53/__init__.py @@ -0,0 +1,75 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +# this is here for backward compatibility +# originally, the Route53Connection class was defined here +from connection import Route53Connection +from boto.regioninfo import RegionInfo + + +class Route53RegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(host=self.endpoint, **kw_params) + + +def regions(): + """ + Get all available regions for the Route53 service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return [Route53RegionInfo(name='universal', + endpoint='route53.amazonaws.com', + connection_cls=Route53Connection) + ] + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.route53.connection.Route53Connection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.route53.connection.Route53Connection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/route53/connection.py b/awx/lib/site-packages/boto/route53/connection.py new file mode 100644 index 0000000000..221b29b297 --- /dev/null +++ b/awx/lib/site-packages/boto/route53/connection.py @@ -0,0 +1,403 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# Copyright (c) 2012 42 Lines Inc., Jim Browne +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import xml.sax +import uuid +import urllib +import boto +from boto.connection import AWSAuthConnection +from boto import handler +from boto.route53.record import ResourceRecordSets +from boto.route53.zone import Zone +import boto.jsonresponse +import exception + +HZXML = """ + + %(name)s + %(caller_ref)s + + %(comment)s + +""" + +#boto.set_stream_logger('dns') + + +class Route53Connection(AWSAuthConnection): + DefaultHost = 'route53.amazonaws.com' + """The default Route53 API endpoint to connect to.""" + + Version = '2012-02-29' + """Route53 API version.""" + + XMLNameSpace = 'https://route53.amazonaws.com/doc/2012-02-29/' + """XML schema for this Route53 API version.""" + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + port=None, proxy=None, proxy_port=None, + host=DefaultHost, debug=0, security_token=None, + validate_certs=True): + AWSAuthConnection.__init__(self, host, + aws_access_key_id, aws_secret_access_key, + True, port, proxy, proxy_port, debug=debug, + security_token=security_token, + validate_certs=validate_certs) + + def _required_auth_capability(self): + return ['route53'] + + def make_request(self, action, path, headers=None, data='', params=None): + if params: + pairs = [] + for key, val in params.iteritems(): + if val is None: + continue + pairs.append(key + '=' + urllib.quote(str(val))) + path += '?' + '&'.join(pairs) + return AWSAuthConnection.make_request(self, action, path, + headers, data) + + # Hosted Zones + + def get_all_hosted_zones(self, start_marker=None, zone_list=None): + """ + Returns a Python data structure with information about all + Hosted Zones defined for the AWS account. + + :param int start_marker: start marker to pass when fetching additional + results after a truncated list + :param list zone_list: a HostedZones list to prepend to results + """ + params = {} + if start_marker: + params = {'marker': start_marker} + response = self.make_request('GET', '/%s/hostedzone' % self.Version, + params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='HostedZones', + item_marker=('HostedZone',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + if zone_list: + e['ListHostedZonesResponse']['HostedZones'].extend(zone_list) + while 'NextMarker' in e['ListHostedZonesResponse']: + next_marker = e['ListHostedZonesResponse']['NextMarker'] + zone_list = e['ListHostedZonesResponse']['HostedZones'] + e = self.get_all_hosted_zones(next_marker, zone_list) + return e + + def get_hosted_zone(self, hosted_zone_id): + """ + Get detailed information about a particular Hosted Zone. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + """ + uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='NameServers', + item_marker=('NameServer',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_hosted_zone_by_name(self, hosted_zone_name): + """ + Get detailed information about a particular Hosted Zone. + + :type hosted_zone_name: str + :param hosted_zone_name: The fully qualified domain name for the Hosted + Zone + + """ + if hosted_zone_name[-1] != '.': + hosted_zone_name += '.' + all_hosted_zones = self.get_all_hosted_zones() + for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']: + #check that they gave us the FQDN for their zone + if zone['Name'] == hosted_zone_name: + return self.get_hosted_zone(zone['Id'].split('/')[-1]) + + def create_hosted_zone(self, domain_name, caller_ref=None, comment=''): + """ + Create a new Hosted Zone. Returns a Python data structure with + information about the newly created Hosted Zone. + + :type domain_name: str + :param domain_name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. If you omit the final period, + Amazon Route 53 assumes the domain is relative to the root. + This is the name you have registered with your DNS registrar. + It is also the name you will delegate from your registrar to + the Amazon Route 53 delegation servers returned in + response to this request.A list of strings with the image + IDs wanted. + + :type caller_ref: str + :param caller_ref: A unique string that identifies the request + and that allows failed CreateHostedZone requests to be retried + without the risk of executing the operation twice. If you don't + provide a value for this, boto will generate a Type 4 UUID and + use that. + + :type comment: str + :param comment: Any comments you want to include about the hosted + zone. + + """ + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + params = {'name': domain_name, + 'caller_ref': caller_ref, + 'comment': comment, + 'xmlns': self.XMLNameSpace} + xml_body = HZXML % params + uri = '/%s/hostedzone' % self.Version + response = self.make_request('POST', uri, + {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element(list_marker='NameServers', + item_marker=('NameServer',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, + response.reason, + body) + + def delete_hosted_zone(self, hosted_zone_id): + uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id) + response = self.make_request('DELETE', uri) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + # Resource Record Sets + + def get_all_rrsets(self, hosted_zone_id, type=None, + name=None, identifier=None, maxitems=None): + """ + Retrieve the Resource Record Sets defined for this Hosted Zone. + Returns the raw XML data returned by the Route53 call. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + :type type: str + :param type: The type of resource record set to begin the record + listing from. Valid choices are: + + * A + * AAAA + * CNAME + * MX + * NS + * PTR + * SOA + * SPF + * SRV + * TXT + + Valid values for weighted resource record sets: + + * A + * AAAA + * CNAME + * TXT + + Valid values for Zone Apex Aliases: + + * A + * AAAA + + :type name: str + :param name: The first name in the lexicographic ordering of domain + names to be retrieved + + :type identifier: str + :param identifier: In a hosted zone that includes weighted resource + record sets (multiple resource record sets with the same DNS + name and type that are differentiated only by SetIdentifier), + if results were truncated for a given DNS name and type, + the value of SetIdentifier for the next resource record + set that has the current DNS name and type + + :type maxitems: int + :param maxitems: The maximum number of records + + """ + params = {'type': type, 'name': name, + 'Identifier': identifier, 'maxitems': maxitems} + uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id) + response = self.make_request('GET', uri, params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + + def change_rrsets(self, hosted_zone_id, xml_body): + """ + Create or change the authoritative DNS information for this + Hosted Zone. + Returns a Python data structure with information about the set of + changes, including the Change ID. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + :type xml_body: str + :param xml_body: The list of changes to be made, defined in the + XML schema defined by the Route53 service. + + """ + uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id) + response = self.make_request('POST', uri, + {'Content-Type': 'text/xml'}, + xml_body) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_change(self, change_id): + """ + Get information about a proposed set of changes, as submitted + by the change_rrsets method. + Returns a Python data structure with status information about the + changes. + + :type change_id: str + :param change_id: The unique identifier for the set of changes. + This ID is returned in the response to the change_rrsets method. + + """ + uri = '/%s/change/%s' % (self.Version, change_id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def create_zone(self, name): + """ + Create a new Hosted Zone. Returns a Zone object for the newly + created Hosted Zone. + + :type name: str + :param name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. If you omit the final period, + Amazon Route 53 assumes the domain is relative to the root. + This is the name you have registered with your DNS registrar. + It is also the name you will delegate from your registrar to + the Amazon Route 53 delegation servers returned in + response to this request. + """ + zone = self.create_hosted_zone(name) + return Zone(self, zone['CreateHostedZoneResponse']['HostedZone']) + + def get_zone(self, name): + """ + Returns a Zone object for the specified Hosted Zone. + + :param name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. + """ + name = self._make_qualified(name) + for zone in self.get_zones(): + if name == zone.name: + return zone + + def get_zones(self): + """ + Returns a list of Zone objects, one for each of the Hosted + Zones defined for the AWS account. + """ + zones = self.get_all_hosted_zones() + return [Zone(self, zone) for zone in + zones['ListHostedZonesResponse']['HostedZones']] + + def _make_qualified(self, value): + """ + Ensure passed domain names end in a period (.) character. + This will usually make a domain fully qualified. + """ + if type(value) in [list, tuple, set]: + new_list = [] + for record in value: + if record and not record[-1] == '.': + new_list.append("%s." % record) + else: + new_list.append(record) + return new_list + else: + value = value.strip() + if value and not value[-1] == '.': + value = "%s." % value + return value diff --git a/awx/lib/site-packages/boto/route53/exception.py b/awx/lib/site-packages/boto/route53/exception.py new file mode 100644 index 0000000000..ba41285abb --- /dev/null +++ b/awx/lib/site-packages/boto/route53/exception.py @@ -0,0 +1,27 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import BotoServerError + +class DNSServerError(BotoServerError): + + pass diff --git a/awx/lib/site-packages/boto/route53/hostedzone.py b/awx/lib/site-packages/boto/route53/hostedzone.py new file mode 100644 index 0000000000..66b79b8479 --- /dev/null +++ b/awx/lib/site-packages/boto/route53/hostedzone.py @@ -0,0 +1,56 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +class HostedZone(object): + + def __init__(self, id=None, name=None, owner=None, version=None, + caller_reference=None, config=None): + self.id = id + self.name = name + self.owner = owner + self.version = version + self.caller_reference = caller_reference + self.config = config + + def startElement(self, name, attrs, connection): + if name == 'Config': + self.config = Config() + return self.config + else: + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'Name': + self.name = value + elif name == 'Owner': + self.owner = value + elif name == 'Version': + self.version = value + elif name == 'CallerReference': + self.caller_reference = value + else: + setattr(self, name, value) + diff --git a/awx/lib/site-packages/boto/route53/record.py b/awx/lib/site-packages/boto/route53/record.py new file mode 100644 index 0000000000..d26ca11953 --- /dev/null +++ b/awx/lib/site-packages/boto/route53/record.py @@ -0,0 +1,313 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF'] + +from boto.resultset import ResultSet +class ResourceRecordSets(ResultSet): + """ + A list of resource records. + + :ivar hosted_zone_id: The ID of the hosted zone. + :ivar comment: A comment that will be stored with the change. + :ivar changes: A list of changes. + """ + + ChangeResourceRecordSetsBody = """ + + + %(comment)s + %(changes)s + + """ + + ChangeXML = """ + %(action)s + %(record)s + """ + + def __init__(self, connection=None, hosted_zone_id=None, comment=None): + self.connection = connection + self.hosted_zone_id = hosted_zone_id + self.comment = comment + self.changes = [] + self.next_record_name = None + self.next_record_type = None + ResultSet.__init__(self, [('ResourceRecordSet', Record)]) + + def __repr__(self): + if self.changes: + record_list = ','.join([c.__repr__() for c in self.changes]) + else: + record_list = ','.join([record.__repr__() for record in self]) + return ' + %(name)s + %(type)s + %(weight)s + %(body)s + """ + + WRRBody = """ + %(identifier)s + %(weight)s + """ + + RRRBody = """ + %(identifier)s + %(region)s + """ + + ResourceRecordsBody = """ + %(ttl)s + + %(records)s + """ + + ResourceRecordBody = """ + %s + """ + + AliasBody = """ + %s + %s + """ + + + + def __init__(self, name=None, type=None, ttl=600, resource_records=None, + alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, + weight=None, region=None): + self.name = name + self.type = type + self.ttl = ttl + if resource_records == None: + resource_records = [] + self.resource_records = resource_records + self.alias_hosted_zone_id = alias_hosted_zone_id + self.alias_dns_name = alias_dns_name + self.identifier = identifier + self.weight = weight + self.region = region + + def __repr__(self): + return '' % (self.name, self.type, self.to_print()) + + def add_value(self, value): + """Add a resource record value""" + self.resource_records.append(value) + + def set_alias(self, alias_hosted_zone_id, alias_dns_name): + """Make this an alias resource record set""" + self.alias_hosted_zone_id = alias_hosted_zone_id + self.alias_dns_name = alias_dns_name + + def to_xml(self): + """Spit this resource record set out as XML""" + if self.alias_hosted_zone_id != None and self.alias_dns_name != None: + # Use alias + body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name) + else: + # Use resource record(s) + records = "" + + for r in self.resource_records: + records += self.ResourceRecordBody % r + + body = self.ResourceRecordsBody % { + "ttl": self.ttl, + "records": records, + } + + weight = "" + + if self.identifier != None and self.weight != None: + weight = self.WRRBody % {"identifier": self.identifier, "weight": + self.weight} + elif self.identifier != None and self.region != None: + weight = self.RRRBody % {"identifier": self.identifier, "region": + self.region} + + params = { + "name": self.name, + "type": self.type, + "weight": weight, + "body": body, + } + return self.XMLBody % params + + def to_print(self): + rr = "" + if self.alias_hosted_zone_id != None and self.alias_dns_name != None: + # Show alias + rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name + else: + # Show resource record(s) + rr = ",".join(self.resource_records) + + if self.identifier != None and self.weight != None: + rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight) + elif self.identifier != None and self.region != None: + rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region) + + return rr + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Type': + self.type = value + elif name == 'TTL': + self.ttl = value + elif name == 'Value': + self.resource_records.append(value) + elif name == 'HostedZoneId': + self.alias_hosted_zone_id = value + elif name == 'DNSName': + self.alias_dns_name = value + elif name == 'SetIdentifier': + self.identifier = value + elif name == 'Weight': + self.weight = value + elif name == 'Region': + self.region = value + + def startElement(self, name, attrs, connection): + return None diff --git a/awx/lib/site-packages/boto/route53/status.py b/awx/lib/site-packages/boto/route53/status.py new file mode 100644 index 0000000000..782372a811 --- /dev/null +++ b/awx/lib/site-packages/boto/route53/status.py @@ -0,0 +1,42 @@ +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Status(object): + def __init__(self, route53connection, change_dict): + self.route53connection = route53connection + for key in change_dict: + if key == 'Id': + self.__setattr__(key.lower(), + change_dict[key].replace('/change/', '')) + else: + self.__setattr__(key.lower(), change_dict[key]) + + def update(self): + """ Update the status of this request.""" + status = self.route53connection.get_change(self.id)['GetChangeResponse']['ChangeInfo']['Status'] + self.status = status + return status + + def __repr__(self): + return '' % self.status diff --git a/awx/lib/site-packages/boto/route53/zone.py b/awx/lib/site-packages/boto/route53/zone.py new file mode 100644 index 0000000000..75cefd48ae --- /dev/null +++ b/awx/lib/site-packages/boto/route53/zone.py @@ -0,0 +1,412 @@ +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# Copyright (c) 2012 42 Lines Inc., Jim Browne +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +default_ttl = 60 + +import copy +from boto.exception import TooManyRecordsException +from boto.route53.record import ResourceRecordSets +from boto.route53.status import Status + + +class Zone(object): + """ + A Route53 Zone. + + :ivar Route53Connection route53connection + :ivar str Id: The ID of the hosted zone. + """ + def __init__(self, route53connection, zone_dict): + self.route53connection = route53connection + for key in zone_dict: + if key == 'Id': + self.id = zone_dict['Id'].replace('/hostedzone/', '') + else: + self.__setattr__(key.lower(), zone_dict[key]) + + def __repr__(self): + return '' % self.name + + def _commit(self, changes): + """ + Commit a set of changes and return the ChangeInfo portion of + the response. + + :type changes: ResourceRecordSets + :param changes: changes to be committed + """ + response = changes.commit() + return response['ChangeResourceRecordSetsResponse']['ChangeInfo'] + + def _new_record(self, changes, resource_type, name, value, ttl, identifier, + comment=""): + """ + Add a CREATE change record to an existing ResourceRecordSets + + :type changes: ResourceRecordSets + :param changes: change set to append to + + :type name: str + :param name: The name of the resource record you want to + perform the action on. + + :type resource_type: str + :param resource_type: The DNS record type + + :param value: Appropriate value for resource_type + + :type ttl: int + :param ttl: The resource record cache time to live (TTL), in seconds. + + :type identifier: tuple + :param identifier: A tuple for setting WRR or LBR attributes. Valid + forms are: + + * (str, int): WRR record [e.g. ('foo',10)] + * (str, str): LBR record [e.g. ('foo','us-east-1') + + :type comment: str + :param comment: A comment that will be stored with the change. + """ + weight = None + region = None + if identifier is not None: + try: + int(identifier[1]) + weight = identifier[1] + identifier = identifier[0] + except: + region = identifier[1] + identifier = identifier[0] + change = changes.add_change("CREATE", name, resource_type, ttl, + identifier=identifier, weight=weight, + region=region) + if type(value) in [list, tuple, set]: + for record in value: + change.add_value(record) + else: + change.add_value(value) + + def add_record(self, resource_type, name, value, ttl=60, identifier=None, + comment=""): + """ + Add a new record to this Zone. See _new_record for parameter + documentation. Returns a Status object. + """ + changes = ResourceRecordSets(self.route53connection, self.id, comment) + self._new_record(changes, resource_type, name, value, ttl, identifier, + comment) + return Status(self.route53connection, self._commit(changes)) + + def update_record(self, old_record, new_value, new_ttl=None, + new_identifier=None, comment=""): + """ + Update an existing record in this Zone. Returns a Status object. + + :type old_record: ResourceRecord + :param old_record: A ResourceRecord (e.g. returned by find_records) + + See _new_record for additional parameter documentation. + """ + new_ttl = new_ttl or default_ttl + record = copy.copy(old_record) + changes = ResourceRecordSets(self.route53connection, self.id, comment) + changes.add_change_record("DELETE", record) + self._new_record(changes, record.type, record.name, + new_value, new_ttl, new_identifier, comment) + return Status(self.route53connection, self._commit(changes)) + + def delete_record(self, record, comment=""): + """ + Delete one or more records from this Zone. Returns a Status object. + + :param record: A ResourceRecord (e.g. returned by + find_records) or list, tuple, or set of ResourceRecords. + + :type comment: str + :param comment: A comment that will be stored with the change. + """ + changes = ResourceRecordSets(self.route53connection, self.id, comment) + if type(record) in [list, tuple, set]: + for r in record: + changes.add_change_record("DELETE", r) + else: + changes.add_change_record("DELETE", record) + return Status(self.route53connection, self._commit(changes)) + + def add_cname(self, name, value, ttl=None, identifier=None, comment=""): + """ + Add a new CNAME record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + return self.add_record(resource_type='CNAME', + name=name, + value=value, + ttl=ttl, + identifier=identifier, + comment=comment) + + def add_a(self, name, value, ttl=None, identifier=None, comment=""): + """ + Add a new A record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + name = self.route53connection._make_qualified(name) + return self.add_record(resource_type='A', + name=name, + value=value, + ttl=ttl, + identifier=identifier, + comment=comment) + + def add_mx(self, name, records, ttl=None, identifier=None, comment=""): + """ + Add a new MX record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + records = self.route53connection._make_qualified(records) + return self.add_record(resource_type='MX', + name=name, + value=records, + ttl=ttl, + identifier=identifier, + comment=comment) + + def find_records(self, name, type, desired=1, all=False, identifier=None): + """ + Search this Zone for records that match given parameters. + Returns None if no results, a ResourceRecord if one result, or + a ResourceRecordSets if more than one result. + + :type name: str + :param name: The name of the records should match this parameter + + :type type: str + :param type: The type of the records should match this parameter + + :type desired: int + :param desired: The number of desired results. If the number of + matching records in the Zone exceeds the value of this parameter, + throw TooManyRecordsException + + :type all: Boolean + :param all: If true return all records that match name, type, and + identifier parameters + + :type identifier: Tuple + :param identifier: A tuple specifying WRR or LBR attributes. Valid + forms are: + + * (str, int): WRR record [e.g. ('foo',10)] + * (str, str): LBR record [e.g. ('foo','us-east-1') + + """ + name = self.route53connection._make_qualified(name) + returned = self.route53connection.get_all_rrsets(self.id, name=name, + type=type) + + # name/type for get_all_rrsets sets the starting record; they + # are not a filter + results = [r for r in returned if r.name == name and r.type == type] + + weight = None + region = None + if identifier is not None: + try: + int(identifier[1]) + weight = identifier[1] + except: + region = identifier[1] + + if weight is not None: + results = [r for r in results if (r.weight == weight and + r.identifier == identifier[0])] + if region is not None: + results = [r for r in results if (r.region == region and + r.identifier == identifier[0])] + + if ((not all) and (len(results) > desired)): + message = "Search: name %s type %s" % (name, type) + message += "\nFound: " + message += ", ".join(["%s %s %s" % (r.name, r.type, r.to_print()) + for r in results]) + raise TooManyRecordsException(message) + elif len(results) > 1: + return results + elif len(results) == 1: + return results[0] + else: + return None + + def get_cname(self, name, all=False): + """ + Search this Zone for CNAME records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'CNAME', all=all) + + def get_a(self, name, all=False): + """ + Search this Zone for A records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'A', all=all) + + def get_mx(self, name, all=False): + """ + Search this Zone for MX records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'MX', all=all) + + def update_cname(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given CNAME record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + old_record = self.get_cname(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def update_a(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given A record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + old_record = self.get_a(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def update_mx(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given MX record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + old_record = self.get_mx(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def delete_cname(self, name, identifier=None, all=False): + """ + Delete a CNAME record matching name and identifier from + this Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'CNAME', identifier=identifier, + all=all) + return self.delete_record(record) + + def delete_a(self, name, identifier=None, all=False): + """ + Delete an A record matching name and identifier from this + Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'A', identifier=identifier, + all=all) + return self.delete_record(record) + + def delete_mx(self, name, identifier=None, all=False): + """ + Delete an MX record matching name and identifier from this + Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'MX', identifier=identifier, + all=all) + return self.delete_record(record) + + def get_records(self): + """ + Return a ResourceRecordsSets for all of the records in this zone. + """ + return self.route53connection.get_all_rrsets(self.id) + + def delete(self): + """ + Request that this zone be deleted by Amazon. + """ + self.route53connection.delete_hosted_zone(self.id) + + def get_nameservers(self): + """ Get the list of nameservers for this zone.""" + ns = self.find_records(self.name, 'NS') + if ns is not None: + ns = ns.resource_records + return ns diff --git a/awx/lib/site-packages/boto/s3/__init__.py b/awx/lib/site-packages/boto/s3/__init__.py new file mode 100644 index 0000000000..f7237157da --- /dev/null +++ b/awx/lib/site-packages/boto/s3/__init__.py @@ -0,0 +1,87 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + + +class S3RegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(host=self.endpoint, **kw_params) + + +def regions(): + """ + Get all available regions for the Amazon S3 service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from .connection import S3Connection + return [S3RegionInfo(name='us-east-1', + endpoint='s3.amazonaws.com', + connection_cls=S3Connection), + S3RegionInfo(name='us-gov-west-1', + endpoint='s3-us-gov-west-1.amazonaws.com', + connection_cls=S3Connection), + S3RegionInfo(name='us-west-1', + endpoint='s3-us-west-1.amazonaws.com', + connection_cls=S3Connection), + S3RegionInfo(name='us-west-2', + endpoint='s3-us-west-2.amazonaws.com', + connection_cls=S3Connection), + S3RegionInfo(name='ap-northeast-1', + endpoint='s3-ap-northeast-1.amazonaws.com', + connection_cls=S3Connection), + S3RegionInfo(name='ap-southeast-1', + endpoint='s3-ap-southeast-1.amazonaws.com', + connection_cls=S3Connection), + S3RegionInfo(name='ap-southeast-2', + endpoint='s3-ap-southeast-2.amazonaws.com', + connection_cls=S3Connection), + S3RegionInfo(name='eu-west-1', + endpoint='s3-eu-west-1.amazonaws.com', + connection_cls=S3Connection), + S3RegionInfo(name='sa-east-1', + endpoint='s3-sa-east-1.amazonaws.com', + connection_cls=S3Connection), + ] + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/s3/acl.py b/awx/lib/site-packages/boto/s3/acl.py new file mode 100644 index 0000000000..a7bca8c97c --- /dev/null +++ b/awx/lib/site-packages/boto/s3/acl.py @@ -0,0 +1,164 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + + +CannedACLStrings = ['private', 'public-read', + 'public-read-write', 'authenticated-read', + 'bucket-owner-read', 'bucket-owner-full-control', + 'log-delivery-write'] + + +class Policy: + + def __init__(self, parent=None): + self.parent = parent + self.acl = None + + def __repr__(self): + grants = [] + for g in self.acl.grants: + if g.id == self.owner.id: + grants.append("%s (owner) = %s" % (g.display_name, g.permission)) + else: + if g.type == 'CanonicalUser': + u = g.display_name + elif g.type == 'Group': + u = g.uri + else: + u = g.email_address + grants.append("%s = %s" % (u, g.permission)) + return "" % ", ".join(grants) + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + elif name == 'AccessControlList': + self.acl = ACL(self) + return self.acl + else: + return None + + def endElement(self, name, value, connection): + if name == 'Owner': + pass + elif name == 'AccessControlList': + pass + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + s += self.owner.to_xml() + s += self.acl.to_xml() + s += '' + return s + +class ACL: + + def __init__(self, policy=None): + self.policy = policy + self.grants = [] + + def add_grant(self, grant): + self.grants.append(grant) + + def add_email_grant(self, permission, email_address): + grant = Grant(permission=permission, type='AmazonCustomerByEmail', + email_address=email_address) + self.grants.append(grant) + + def add_user_grant(self, permission, user_id, display_name=None): + grant = Grant(permission=permission, type='CanonicalUser', id=user_id, display_name=display_name) + self.grants.append(grant) + + def startElement(self, name, attrs, connection): + if name == 'Grant': + self.grants.append(Grant(self)) + return self.grants[-1] + else: + return None + + def endElement(self, name, value, connection): + if name == 'Grant': + pass + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + for grant in self.grants: + s += grant.to_xml() + s += '' + return s + +class Grant: + + NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' + + def __init__(self, permission=None, type=None, id=None, + display_name=None, uri=None, email_address=None): + self.permission = permission + self.id = id + self.display_name = display_name + self.uri = uri + self.email_address = email_address + self.type = type + + def startElement(self, name, attrs, connection): + if name == 'Grantee': + self.type = attrs['xsi:type'] + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'DisplayName': + self.display_name = value + elif name == 'URI': + self.uri = value + elif name == 'EmailAddress': + self.email_address = value + elif name == 'Grantee': + pass + elif name == 'Permission': + self.permission = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + s += '' % (self.NameSpace, self.type) + if self.type == 'CanonicalUser': + s += '%s' % self.id + s += '%s' % self.display_name + elif self.type == 'Group': + s += '%s' % self.uri + else: + s += '%s' % self.email_address + s += '' + s += '%s' % self.permission + s += '' + return s + + diff --git a/awx/lib/site-packages/boto/s3/bucket.py b/awx/lib/site-packages/boto/s3/bucket.py new file mode 100644 index 0000000000..335e9faaed --- /dev/null +++ b/awx/lib/site-packages/boto/s3/bucket.py @@ -0,0 +1,1669 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto import handler +from boto.resultset import ResultSet +from boto.exception import BotoClientError +from boto.s3.acl import Policy, CannedACLStrings, Grant +from boto.s3.key import Key +from boto.s3.prefix import Prefix +from boto.s3.deletemarker import DeleteMarker +from boto.s3.multipart import MultiPartUpload +from boto.s3.multipart import CompleteMultiPartUpload +from boto.s3.multidelete import MultiDeleteResult +from boto.s3.multidelete import Error +from boto.s3.bucketlistresultset import BucketListResultSet +from boto.s3.bucketlistresultset import VersionedBucketListResultSet +from boto.s3.bucketlistresultset import MultiPartUploadListResultSet +from boto.s3.lifecycle import Lifecycle +from boto.s3.tagging import Tags +from boto.s3.cors import CORSConfiguration +from boto.s3.bucketlogging import BucketLogging +from boto.s3 import website +import boto.jsonresponse +import boto.utils +import xml.sax +import xml.sax.saxutils +import StringIO +import urllib +import re +import base64 +from collections import defaultdict + +# as per http://goo.gl/BDuud (02/19/2011) + + +class S3WebsiteEndpointTranslate: + + trans_region = defaultdict(lambda: 's3-website-us-east-1') + trans_region['eu-west-1'] = 's3-website-eu-west-1' + trans_region['us-west-1'] = 's3-website-us-west-1' + trans_region['us-west-2'] = 's3-website-us-west-2' + trans_region['sa-east-1'] = 's3-website-sa-east-1' + trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1' + trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1' + + @classmethod + def translate_region(self, reg): + return self.trans_region[reg] + +S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'] + + +class Bucket(object): + + LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery' + + BucketPaymentBody = """ + + %s + """ + + VersioningBody = """ + + %s + %s + """ + + VersionRE = '([A-Za-z]+)' + MFADeleteRE = '([A-Za-z]+)' + + def __init__(self, connection=None, name=None, key_class=Key): + self.name = name + self.connection = connection + self.key_class = key_class + + def __repr__(self): + return '' % self.name + + def __iter__(self): + return iter(BucketListResultSet(self)) + + def __contains__(self, key_name): + return not (self.get_key(key_name) is None) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'CreationDate': + self.creation_date = value + else: + setattr(self, name, value) + + def set_key_class(self, key_class): + """ + Set the Key class associated with this bucket. By default, this + would be the boto.s3.key.Key class but if you want to subclass that + for some reason this allows you to associate your new class with a + bucket so that when you call bucket.new_key() or when you get a listing + of keys in the bucket you will get an instances of your key class + rather than the default. + + :type key_class: class + :param key_class: A subclass of Key that can be more specific + """ + self.key_class = key_class + + def lookup(self, key_name, headers=None): + """ + Deprecated: Please use get_key method. + + :type key_name: string + :param key_name: The name of the key to retrieve + + :rtype: :class:`boto.s3.key.Key` + :returns: A Key object from this bucket. + """ + return self.get_key(key_name, headers=headers) + + def get_key(self, key_name, headers=None, version_id=None, + response_headers=None): + """ + Check to see if a particular key exists within the bucket. This + method uses a HEAD request to check for the existance of the key. + Returns: An instance of a Key object or None + + :type key_name: string + :param key_name: The name of the key to retrieve + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :rtype: :class:`boto.s3.key.Key` + :returns: A Key object from this bucket. + """ + query_args_l = [] + if version_id: + query_args_l.append('versionId=%s' % version_id) + if response_headers: + for rk, rv in response_headers.iteritems(): + query_args_l.append('%s=%s' % (rk, urllib.quote(rv))) + + key, resp = self._get_key_internal(key_name, headers, query_args_l) + return key + + def _get_key_internal(self, key_name, headers, query_args_l): + query_args = '&'.join(query_args_l) or None + response = self.connection.make_request('HEAD', self.name, key_name, + headers=headers, + query_args=query_args) + response.read() + # Allow any success status (2xx) - for example this lets us + # support Range gets, which return status 206: + if response.status / 100 == 2: + k = self.key_class(self) + provider = self.connection.provider + k.metadata = boto.utils.get_aws_metadata(response.msg, provider) + k.etag = response.getheader('etag') + k.content_type = response.getheader('content-type') + k.content_encoding = response.getheader('content-encoding') + k.content_disposition = response.getheader('content-disposition') + k.content_language = response.getheader('content-language') + k.last_modified = response.getheader('last-modified') + # the following machinations are a workaround to the fact that + # apache/fastcgi omits the content-length header on HEAD + # requests when the content-length is zero. + # See http://goo.gl/0Tdax for more details. + clen = response.getheader('content-length') + if clen: + k.size = int(response.getheader('content-length')) + else: + k.size = 0 + k.cache_control = response.getheader('cache-control') + k.name = key_name + k.handle_version_headers(response) + k.handle_encryption_headers(response) + k.handle_restore_headers(response) + k.handle_addl_headers(response.getheaders()) + return k, response + else: + if response.status == 404: + return None, response + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, '') + + def list(self, prefix='', delimiter='', marker='', headers=None): + """ + List key objects within a bucket. This returns an instance of an + BucketListResultSet that automatically handles all of the result + paging, etc. from S3. You just need to keep iterating until + there are no more results. + + Called with no arguments, this will return an iterator object across + all keys within the bucket. + + The Key objects returned by the iterator are obtained by parsing + the results of a GET on the bucket, also known as the List Objects + request. The XML returned by this request contains only a subset + of the information about each key. Certain metadata fields such + as Content-Type and user metadata are not available in the XML. + Therefore, if you want these additional metadata fields you will + have to do a HEAD request on the Key in the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle through + the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See http://goo.gl/Xx63h for more details. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return BucketListResultSet(self, prefix, delimiter, marker, headers) + + def list_versions(self, prefix='', delimiter='', key_marker='', + version_id_marker='', headers=None): + """ + List version objects within a bucket. This returns an + instance of an VersionedBucketListResultSet that automatically + handles all of the result paging, etc. from S3. You just need + to keep iterating until there are no more results. Called + with no arguments, this will return an iterator object across + all keys within the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle through + the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See: + + http://aws.amazon.com/releasenotes/Amazon-S3/213 + + for more details. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return VersionedBucketListResultSet(self, prefix, delimiter, + key_marker, version_id_marker, + headers) + + def list_multipart_uploads(self, key_marker='', + upload_id_marker='', + headers=None): + """ + List multipart upload objects within a bucket. This returns an + instance of an MultiPartUploadListResultSet that automatically + handles all of the result paging, etc. from S3. You just need + to keep iterating until there are no more results. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return MultiPartUploadListResultSet(self, key_marker, + upload_id_marker, + headers) + + def _get_all_query_args(self, params, initial_query_string=''): + pairs = [] + + if initial_query_string: + pairs.append(initial_query_string) + + for key, value in params.items(): + key = key.replace('_', '-') + if key == 'maxkeys': + key = 'max-keys' + if isinstance(value, unicode): + value = value.encode('utf-8') + if value is not None and value != '': + pairs.append('%s=%s' % ( + urllib.quote(key), + urllib.quote(str(value) + ))) + + return '&'.join(pairs) + + def _get_all(self, element_map, initial_query_string='', + headers=None, **params): + query_args = self._get_all_query_args( + params, + initial_query_string=initial_query_string + ) + response = self.connection.make_request('GET', self.name, + headers=headers, + query_args=query_args) + body = response.read() + boto.log.debug(body) + if response.status == 200: + rs = ResultSet(element_map) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_all_keys(self, headers=None, **params): + """ + A lower-level method for listing contents of a bucket. This + closely models the actual S3 API and requires you to manually + handle the paging of results. For a higher-level method that + handles the details of paging for you, you can use the list + method. + + :type max_keys: int + :param max_keys: The maximum number of keys to retrieve + + :type prefix: string + :param prefix: The prefix of the keys you want to retrieve + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :type delimiter: string + :param delimiter: If this optional, Unicode string parameter + is included with your request, then keys that contain the + same string between the prefix and the first occurrence of + the delimiter will be rolled up into a single result + element in the CommonPrefixes collection. These rolled-up + keys are not returned elsewhere in the response. + + :rtype: ResultSet + :return: The result from S3 listing the keys requested + + """ + return self._get_all([('Contents', self.key_class), + ('CommonPrefixes', Prefix)], + '', headers, **params) + + def get_all_versions(self, headers=None, **params): + """ + A lower-level, version-aware method for listing contents of a + bucket. This closely models the actual S3 API and requires + you to manually handle the paging of results. For a + higher-level method that handles the details of paging for + you, you can use the list method. + + :type max_keys: int + :param max_keys: The maximum number of keys to retrieve + + :type prefix: string + :param prefix: The prefix of the keys you want to retrieve + + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + with respect to keys. + + :type version_id_marker: string + :param version_id_marker: The "marker" of where you are in the result + set with respect to version-id's. + + :type delimiter: string + :param delimiter: If this optional, Unicode string parameter + is included with your request, then keys that contain the + same string between the prefix and the first occurrence of + the delimiter will be rolled up into a single result + element in the CommonPrefixes collection. These rolled-up + keys are not returned elsewhere in the response. + + :rtype: ResultSet + :return: The result from S3 listing the keys requested + """ + return self._get_all([('Version', self.key_class), + ('CommonPrefixes', Prefix), + ('DeleteMarker', DeleteMarker)], + 'versions', headers, **params) + + def get_all_multipart_uploads(self, headers=None, **params): + """ + A lower-level, version-aware method for listing active + MultiPart uploads for a bucket. This closely models the + actual S3 API and requires you to manually handle the paging + of results. For a higher-level method that handles the + details of paging for you, you can use the list method. + + :type max_uploads: int + :param max_uploads: The maximum number of uploads to retrieve. + Default value is 1000. + + :type key_marker: string + :param key_marker: Together with upload_id_marker, this + parameter specifies the multipart upload after which + listing should begin. If upload_id_marker is not + specified, only the keys lexicographically greater than + the specified key_marker will be included in the list. + + If upload_id_marker is specified, any multipart uploads + for a key equal to the key_marker might also be included, + provided those multipart uploads have upload IDs + lexicographically greater than the specified + upload_id_marker. + + :type upload_id_marker: string + :param upload_id_marker: Together with key-marker, specifies + the multipart upload after which listing should begin. If + key_marker is not specified, the upload_id_marker + parameter is ignored. Otherwise, any multipart uploads + for a key equal to the key_marker might be included in the + list only if they have an upload ID lexicographically + greater than the specified upload_id_marker. + + :rtype: ResultSet + :return: The result from S3 listing the uploads requested + + """ + return self._get_all([('Upload', MultiPartUpload), + ('CommonPrefixes', Prefix)], + 'uploads', headers, **params) + + def new_key(self, key_name=None): + """ + Creates a new key + + :type key_name: string + :param key_name: The name of the key to create + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + if not key_name: + raise ValueError('Empty key names are not allowed') + return self.key_class(self, key_name) + + def generate_url(self, expires_in, method='GET', headers=None, + force_http=False, response_headers=None, + expires_in_absolute=False): + return self.connection.generate_url(expires_in, method, self.name, + headers=headers, + force_http=force_http, + response_headers=response_headers, + expires_in_absolute=expires_in_absolute) + + def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None): + """ + Deletes a set of keys using S3's Multi-object delete API. If a + VersionID is specified for that key then that version is removed. + Returns a MultiDeleteResult Object, which contains Deleted + and Error elements for each key you ask to delete. + + :type keys: list + :param keys: A list of either key_names or (key_name, versionid) pairs + or a list of Key instances. + + :type quiet: boolean + :param quiet: In quiet mode the response includes only keys + where the delete operation encountered an error. For a + successful deletion, the operation does not return any + information about the delete in the response body. + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required anytime you are deleting versioned objects from a + bucket that has the MFADelete option on the bucket. + + :returns: An instance of MultiDeleteResult + """ + ikeys = iter(keys) + result = MultiDeleteResult(self) + provider = self.connection.provider + query_args = 'delete' + + def delete_keys2(hdrs): + hdrs = hdrs or {} + data = u"""""" + data += u"" + if quiet: + data += u"true" + count = 0 + while count < 1000: + try: + key = ikeys.next() + except StopIteration: + break + if isinstance(key, basestring): + key_name = key + version_id = None + elif isinstance(key, tuple) and len(key) == 2: + key_name, version_id = key + elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name: + key_name = key.name + version_id = key.version_id + else: + if isinstance(key, Prefix): + key_name = key.name + code = 'PrefixSkipped' # Don't delete Prefix + else: + key_name = repr(key) # try get a string + code = 'InvalidArgument' # other unknown type + message = 'Invalid. No delete action taken for this object.' + error = Error(key_name, code=code, message=message) + result.errors.append(error) + continue + count += 1 + data += u"%s" % xml.sax.saxutils.escape(key_name) + if version_id: + data += u"%s" % version_id + data += u"" + data += u"" + if count <= 0: + return False # no more + data = data.encode('utf-8') + fp = StringIO.StringIO(data) + md5 = boto.utils.compute_md5(fp) + hdrs['Content-MD5'] = md5[1] + hdrs['Content-Type'] = 'text/xml' + if mfa_token: + hdrs[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('POST', self.name, + headers=hdrs, + query_args=query_args, + data=data) + body = response.read() + if response.status == 200: + h = handler.XmlHandler(result, self) + xml.sax.parseString(body, h) + return count >= 1000 # more? + else: + raise provider.storage_response_error(response.status, + response.reason, + body) + while delete_keys2(headers): + pass + return result + + def delete_key(self, key_name, headers=None, version_id=None, + mfa_token=None): + """ + Deletes a key from the bucket. If a version_id is provided, + only that version of the key will be deleted. + + :type key_name: string + :param key_name: The key name to delete + + :type version_id: string + :param version_id: The version ID (optional) + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required anytime you are deleting versioned objects from a + bucket that has the MFADelete option on the bucket. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: A key object holding information on what was + deleted. The Caller can see if a delete_marker was + created or removed and what version_id the delete created + or removed. + """ + if not key_name: + raise ValueError('Empty key names are not allowed') + return self._delete_key_internal(key_name, headers=headers, + version_id=version_id, + mfa_token=mfa_token, + query_args_l=None) + + def _delete_key_internal(self, key_name, headers=None, version_id=None, + mfa_token=None, query_args_l=None): + query_args_l = query_args_l or [] + provider = self.connection.provider + if version_id: + query_args_l.append('versionId=%s' % version_id) + query_args = '&'.join(query_args_l) or None + if mfa_token: + if not headers: + headers = {} + headers[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('DELETE', self.name, key_name, + headers=headers, + query_args=query_args) + body = response.read() + if response.status != 204: + raise provider.storage_response_error(response.status, + response.reason, body) + else: + # return a key object with information on what was deleted. + k = self.key_class(self) + k.name = key_name + k.handle_version_headers(response) + k.handle_addl_headers(response.getheaders()) + return k + + def copy_key(self, new_key_name, src_bucket_name, + src_key_name, metadata=None, src_version_id=None, + storage_class='STANDARD', preserve_acl=False, + encrypt_key=False, headers=None, query_args=None): + """ + Create a new key in the bucket by copying another existing key. + + :type new_key_name: string + :param new_key_name: The name of the new key + + :type src_bucket_name: string + :param src_bucket_name: The name of the source bucket + + :type src_key_name: string + :param src_key_name: The name of the source key + + :type src_version_id: string + :param src_version_id: The version id for the key. This param + is optional. If not specified, the newest version of the + key will be copied. + + :type metadata: dict + :param metadata: Metadata to be associated with new key. If + metadata is supplied, it will replace the metadata of the + source key being copied. If no metadata is supplied, the + source key's metadata will be copied to the new key. + + :type storage_class: string + :param storage_class: The storage class of the new key. By + default, the new key will use the standard storage class. + Possible values are: STANDARD | REDUCED_REDUNDANCY + + :type preserve_acl: bool + :param preserve_acl: If True, the ACL from the source key will + be copied to the destination key. If False, the + destination key will have the default ACL. Note that + preserving the ACL in the new key object will require two + additional API calls to S3, one to retrieve the current + ACL and one to set that ACL on the new object. If you + don't care about the ACL, a value of False will be + significantly more efficient. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :type query_args: string + :param query_args: A string of additional querystring arguments + to append to the request + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + headers = headers or {} + provider = self.connection.provider + src_key_name = boto.utils.get_utf8_value(src_key_name) + if preserve_acl: + if self.name == src_bucket_name: + src_bucket = self + else: + src_bucket = self.connection.get_bucket(src_bucket_name) + acl = src_bucket.get_xml_acl(src_key_name) + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name)) + if src_version_id: + src += '?versionId=%s' % src_version_id + headers[provider.copy_source_header] = str(src) + # make sure storage_class_header key exists before accessing it + if provider.storage_class_header and storage_class: + headers[provider.storage_class_header] = storage_class + if metadata is not None: + headers[provider.metadata_directive_header] = 'REPLACE' + headers = boto.utils.merge_meta(headers, metadata, provider) + elif not query_args: # Can't use this header with multi-part copy. + headers[provider.metadata_directive_header] = 'COPY' + response = self.connection.make_request('PUT', self.name, new_key_name, + headers=headers, + query_args=query_args) + body = response.read() + if response.status == 200: + key = self.new_key(new_key_name) + h = handler.XmlHandler(key, self) + xml.sax.parseString(body, h) + if hasattr(key, 'Error'): + raise provider.storage_copy_error(key.Code, key.Message, body) + key.handle_version_headers(response) + key.handle_addl_headers(response.getheaders()) + if preserve_acl: + self.set_xml_acl(acl, new_key_name) + return key + else: + raise provider.storage_response_error(response.status, + response.reason, body) + + def set_canned_acl(self, acl_str, key_name='', headers=None, + version_id=None): + assert acl_str in CannedACLStrings + + if headers: + headers[self.connection.provider.acl_header] = acl_str + else: + headers = {self.connection.provider.acl_header: acl_str} + + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('PUT', self.name, key_name, + headers=headers, query_args=query_args) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_xml_acl(self, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None, + query_args='acl'): + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('PUT', self.name, key_name, + data=acl_str.encode('UTF-8'), + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None): + if isinstance(acl_or_str, Policy): + self.set_xml_acl(acl_or_str.to_xml(), key_name, + headers, version_id) + else: + self.set_canned_acl(acl_or_str, key_name, + headers, version_id) + + def get_acl(self, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status == 200: + policy = Policy(self) + h = handler.XmlHandler(policy, self) + xml.sax.parseString(body, h) + return policy + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_subresource(self, subresource, value, key_name='', headers=None, + version_id=None): + """ + Set a subresource for a bucket or key. + + :type subresource: string + :param subresource: The subresource to set. + + :type value: string + :param value: The value of the subresource. + + :type key_name: string + :param key_name: The key to operate on, or None to operate on the + bucket. + + :type headers: dict + :param headers: Additional HTTP headers to include in the request. + + :type src_version_id: string + :param src_version_id: Optional. The version id of the key to + operate on. If not specified, operate on the newest + version. + """ + if not subresource: + raise TypeError('set_subresource called with subresource=None') + query_args = subresource + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('PUT', self.name, key_name, + data=value.encode('UTF-8'), + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_subresource(self, subresource, key_name='', headers=None, + version_id=None): + """ + Get a subresource for a bucket or key. + + :type subresource: string + :param subresource: The subresource to get. + + :type key_name: string + :param key_name: The key to operate on, or None to operate on the + bucket. + + :type headers: dict + :param headers: Additional HTTP headers to include in the request. + + :type src_version_id: string + :param src_version_id: Optional. The version id of the key to + operate on. If not specified, operate on the newest + version. + + :rtype: string + :returns: The value of the subresource. + """ + if not subresource: + raise TypeError('get_subresource called with subresource=None') + query_args = subresource + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def make_public(self, recursive=False, headers=None): + self.set_canned_acl('public-read', headers=headers) + if recursive: + for key in self: + self.set_canned_acl('public-read', key.name, headers=headers) + + def add_email_grant(self, permission, email_address, + recursive=False, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + """ + if permission not in S3Permissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + policy = self.get_acl(headers=headers) + policy.acl.add_email_grant(permission, email_address) + self.set_acl(policy, headers=headers) + if recursive: + for key in self: + key.add_email_grant(permission, email_address, headers=headers) + + def add_user_grant(self, permission, user_id, recursive=False, + headers=None, display_name=None): + """ + Convenience method that provides a quick way to add a canonical + user grant to a bucket. This method retrieves the current ACL, + creates a new grant based on the parameters passed in, adds that + grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type user_id: string + :param user_id: The canonical user id associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + + :type display_name: string + :param display_name: An option string containing the user's + Display Name. Only required on Walrus. + """ + if permission not in S3Permissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + policy = self.get_acl(headers=headers) + policy.acl.add_user_grant(permission, user_id, + display_name=display_name) + self.set_acl(policy, headers=headers) + if recursive: + for key in self: + key.add_user_grant(permission, user_id, headers=headers, + display_name=display_name) + + def list_grants(self, headers=None): + policy = self.get_acl(headers=headers) + return policy.acl.grants + + def get_location(self): + """ + Returns the LocationConstraint for the bucket. + + :rtype: str + :return: The LocationConstraint for the bucket or the empty + string if no constraint was specified when bucket was created. + """ + response = self.connection.make_request('GET', self.name, + query_args='location') + body = response.read() + if response.status == 200: + rs = ResultSet(self) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs.LocationConstraint + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_xml_logging(self, logging_str, headers=None): + """ + Set logging on a bucket directly to the given xml string. + + :type logging_str: unicode string + :param logging_str: The XML for the bucketloggingstatus which + will be set. The string will be converted to utf-8 before + it is sent. Usually, you will obtain this XML from the + BucketLogging object. + + :rtype: bool + :return: True if ok or raises an exception. + """ + body = logging_str.encode('utf-8') + response = self.connection.make_request('PUT', self.name, data=body, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def enable_logging(self, target_bucket, target_prefix='', + grants=None, headers=None): + """ + Enable logging on a bucket. + + :type target_bucket: bucket or string + :param target_bucket: The bucket to log to. + + :type target_prefix: string + :param target_prefix: The prefix which should be prepended to the + generated log files written to the target_bucket. + + :type grants: list of Grant objects + :param grants: A list of extra permissions which will be granted on + the log files which are created. + + :rtype: bool + :return: True if ok or raises an exception. + """ + if isinstance(target_bucket, Bucket): + target_bucket = target_bucket.name + blogging = BucketLogging(target=target_bucket, prefix=target_prefix, + grants=grants) + return self.set_xml_logging(blogging.to_xml(), headers=headers) + + def disable_logging(self, headers=None): + """ + Disable logging on a bucket. + + :rtype: bool + :return: True if ok or raises an exception. + """ + blogging = BucketLogging() + return self.set_xml_logging(blogging.to_xml(), headers=headers) + + def get_logging_status(self, headers=None): + """ + Get the logging status for this bucket. + + :rtype: :class:`boto.s3.bucketlogging.BucketLogging` + :return: A BucketLogging object for this bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + blogging = BucketLogging() + h = handler.XmlHandler(blogging, self) + xml.sax.parseString(body, h) + return blogging + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_as_logging_target(self, headers=None): + """ + Setup the current bucket as a logging target by granting the necessary + permissions to the LogDelivery group to write log files to this bucket. + """ + policy = self.get_acl(headers=headers) + g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup) + g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup) + policy.acl.add_grant(g1) + policy.acl.add_grant(g2) + self.set_acl(policy, headers=headers) + + def get_request_payment(self, headers=None): + response = self.connection.make_request('GET', self.name, + query_args='requestPayment', headers=headers) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_request_payment(self, payer='BucketOwner', headers=None): + body = self.BucketPaymentBody % payer + response = self.connection.make_request('PUT', self.name, data=body, + query_args='requestPayment', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_versioning(self, versioning, mfa_delete=False, + mfa_token=None, headers=None): + """ + Configure versioning for this bucket. + + ..note:: This feature is currently in beta. + + :type versioning: bool + :param versioning: A boolean indicating whether version is + enabled (True) or disabled (False). + + :type mfa_delete: bool + :param mfa_delete: A boolean indicating whether the + Multi-Factor Authentication Delete feature is enabled + (True) or disabled (False). If mfa_delete is enabled then + all Delete operations will require the token from your MFA + device to be passed in the request. + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required when you are changing the status of the MfaDelete + property of the bucket. + """ + if versioning: + ver = 'Enabled' + else: + ver = 'Suspended' + if mfa_delete: + mfa = 'Enabled' + else: + mfa = 'Disabled' + body = self.VersioningBody % (ver, mfa) + if mfa_token: + if not headers: + headers = {} + provider = self.connection.provider + headers[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('PUT', self.name, data=body, + query_args='versioning', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_versioning_status(self, headers=None): + """ + Returns the current status of versioning on the bucket. + + :rtype: dict + :returns: A dictionary containing a key named 'Versioning' + that can have a value of either Enabled, Disabled, or + Suspended. Also, if MFADelete has ever been enabled on the + bucket, the dictionary will contain a key named + 'MFADelete' which will have a value of either Enabled or + Suspended. + """ + response = self.connection.make_request('GET', self.name, + query_args='versioning', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + d = {} + ver = re.search(self.VersionRE, body) + if ver: + d['Versioning'] = ver.group(1) + mfa = re.search(self.MFADeleteRE, body) + if mfa: + d['MfaDelete'] = mfa.group(1) + return d + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_lifecycle(self, lifecycle_config, headers=None): + """ + Configure lifecycle for this bucket. + + :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle` + :param lifecycle_config: The lifecycle configuration you want + to configure for this bucket. + """ + xml = lifecycle_config.to_xml() + xml = xml.encode('utf-8') + fp = StringIO.StringIO(xml) + md5 = boto.utils.compute_md5(fp) + if headers is None: + headers = {} + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('PUT', self.name, + data=fp.getvalue(), + query_args='lifecycle', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_lifecycle_config(self, headers=None): + """ + Returns the current lifecycle configuration on the bucket. + + :rtype: :class:`boto.s3.lifecycle.Lifecycle` + :returns: A LifecycleConfig object that describes all current + lifecycle rules in effect for the bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args='lifecycle', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + lifecycle = Lifecycle() + h = handler.XmlHandler(lifecycle, self) + xml.sax.parseString(body, h) + return lifecycle + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete_lifecycle_configuration(self, headers=None): + """ + Removes all lifecycle configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='lifecycle', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_website(self, suffix=None, error_key=None, + redirect_all_requests_to=None, + routing_rules=None, + headers=None): + """ + Configure this bucket to act as a website + + :type suffix: str + :param suffix: Suffix that is appended to a request that is for a + "directory" on the website endpoint (e.g. if the suffix is + index.html and you make a request to samplebucket/images/ + the data that is returned will be for the object with the + key name images/index.html). The suffix must not be empty + and must not include a slash character. + + :type error_key: str + :param error_key: The object key name to use when a 4XX class + error occurs. This is optional. + + :type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation` + :param redirect_all_requests_to: Describes the redirect behavior for + every request to this bucket's website endpoint. If this value is + non None, no other values are considered when configuring the + website configuration for the bucket. This is an instance of + ``RedirectLocation``. + + :type routing_rules: :class:`boto.s3.website.RoutingRules` + :param routing_rules: Object which specifies conditions + and redirects that apply when the conditions are met. + + """ + config = website.WebsiteConfiguration( + suffix, error_key, redirect_all_requests_to, + routing_rules) + return self.set_website_configuration(config, headers=headers) + + def set_website_configuration(self, config, headers=None): + """ + :type config: boto.s3.website.WebsiteConfiguration + :param config: Configuration data + """ + return self.set_website_configuration_xml(config.to_xml(), + headers=headers) + + + def set_website_configuration_xml(self, xml, headers=None): + """Upload xml website configuration""" + response = self.connection.make_request('PUT', self.name, data=xml, + query_args='website', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_website_configuration(self, headers=None): + """ + Returns the current status of website configuration on the bucket. + + :rtype: dict + :returns: A dictionary containing a Python representation + of the XML response from S3. The overall structure is: + + * WebsiteConfiguration + + * IndexDocument + + * Suffix : suffix that is appended to request that + is for a "directory" on the website endpoint + * ErrorDocument + + * Key : name of object to serve when an error occurs + """ + return self.get_website_configuration_with_xml(headers)[0] + + def get_website_configuration_obj(self, headers=None): + """Get the website configuration as a + :class:`boto.s3.website.WebsiteConfiguration` object. + """ + config_xml = self.get_website_configuration_xml(headers=headers) + config = website.WebsiteConfiguration() + h = handler.XmlHandler(config, self) + xml.sax.parseString(config_xml, h) + return config + + def get_website_configuration_with_xml(self, headers=None): + """ + Returns the current status of website configuration on the bucket as + unparsed XML. + + :rtype: 2-Tuple + :returns: 2-tuple containing: + 1) A dictionary containing a Python representation + of the XML response. The overall structure is: + * WebsiteConfiguration + * IndexDocument + * Suffix : suffix that is appended to request that + is for a "directory" on the website endpoint + * ErrorDocument + * Key : name of object to serve when an error occurs + 2) unparsed XML describing the bucket's website configuration. + """ + + body = self.get_website_configuration_xml(headers=headers) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e, body + + def get_website_configuration_xml(self, headers=None): + """Get raw website configuration xml""" + response = self.connection.make_request('GET', self.name, + query_args='website', headers=headers) + body = response.read() + boto.log.debug(body) + + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def delete_website_configuration(self, headers=None): + """ + Removes all website configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='website', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_website_endpoint(self): + """ + Returns the fully qualified hostname to use is you want to access this + bucket as a website. This doesn't validate whether the bucket has + been correctly configured as a website or not. + """ + l = [self.name] + l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location())) + l.append('.'.join(self.connection.host.split('.')[-2:])) + return '.'.join(l) + + def get_policy(self, headers=None): + """ + Returns the JSON policy associated with the bucket. The policy + is returned as an uninterpreted JSON string. + """ + response = self.connection.make_request('GET', self.name, + query_args='policy', headers=headers) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_policy(self, policy, headers=None): + """ + Add or replace the JSON policy associated with the bucket. + + :type policy: str + :param policy: The JSON policy as a string. + """ + response = self.connection.make_request('PUT', self.name, + data=policy, + query_args='policy', + headers=headers) + body = response.read() + if response.status >= 200 and response.status <= 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete_policy(self, headers=None): + response = self.connection.make_request('DELETE', self.name, + data='/?policy', + query_args='policy', + headers=headers) + body = response.read() + if response.status >= 200 and response.status <= 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_cors_xml(self, cors_xml, headers=None): + """ + Set the CORS (Cross-Origin Resource Sharing) for a bucket. + + :type cors_xml: str + :param cors_xml: The XML document describing your desired + CORS configuration. See the S3 documentation for details + of the exact syntax required. + """ + fp = StringIO.StringIO(cors_xml) + md5 = boto.utils.compute_md5(fp) + if headers is None: + headers = {} + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('PUT', self.name, + data=fp.getvalue(), + query_args='cors', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_cors(self, cors_config, headers=None): + """ + Set the CORS for this bucket given a boto CORSConfiguration + object. + + :type cors_config: :class:`boto.s3.cors.CORSConfiguration` + :param cors_config: The CORS configuration you want + to configure for this bucket. + """ + return self.set_cors_xml(cors_config.to_xml()) + + def get_cors_xml(self, headers=None): + """ + Returns the current CORS configuration on the bucket as an + XML document. + """ + response = self.connection.make_request('GET', self.name, + query_args='cors', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_cors(self, headers=None): + """ + Returns the current CORS configuration on the bucket. + + :rtype: :class:`boto.s3.cors.CORSConfiguration` + :returns: A CORSConfiguration object that describes all current + CORS rules in effect for the bucket. + """ + body = self.get_cors_xml(headers) + cors = CORSConfiguration() + h = handler.XmlHandler(cors, self) + xml.sax.parseString(body, h) + return cors + + def delete_cors(self, headers=None): + """ + Removes all CORS configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='cors', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def initiate_multipart_upload(self, key_name, headers=None, + reduced_redundancy=False, + metadata=None, encrypt_key=False, + policy=None): + """ + Start a multipart upload operation. + + :type key_name: string + :param key_name: The name of the key that will ultimately + result from this multipart upload operation. This will be + exactly as the key appears in the bucket after the upload + process has been completed. + + :type headers: dict + :param headers: Additional HTTP headers to send and store with the + resulting key in S3. + + :type reduced_redundancy: boolean + :param reduced_redundancy: In multipart uploads, the storage + class is specified when initiating the upload, not when + uploading individual parts. So if you want the resulting + key to use the reduced redundancy storage class set this + flag when you initiate the upload. + + :type metadata: dict + :param metadata: Any metadata that you would like to set on the key + that results from the multipart upload. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key (once completed) in S3. + """ + query_args = 'uploads' + provider = self.connection.provider + headers = headers or {} + if policy: + headers[provider.acl_header] = policy + if reduced_redundancy: + storage_class_header = provider.storage_class_header + if storage_class_header: + headers[storage_class_header] = 'REDUCED_REDUNDANCY' + # TODO: what if the provider doesn't support reduced redundancy? + # (see boto.s3.key.Key.set_contents_from_file) + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + if metadata is None: + metadata = {} + + headers = boto.utils.merge_meta(headers, metadata, + self.connection.provider) + response = self.connection.make_request('POST', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + resp = MultiPartUpload(self) + h = handler.XmlHandler(resp, self) + xml.sax.parseString(body, h) + return resp + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def complete_multipart_upload(self, key_name, upload_id, + xml_body, headers=None): + """ + Complete a multipart upload operation. + """ + query_args = 'uploadId=%s' % upload_id + if headers is None: + headers = {} + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('POST', self.name, key_name, + query_args=query_args, + headers=headers, data=xml_body) + contains_error = False + body = response.read() + # Some errors will be reported in the body of the response + # even though the HTTP response code is 200. This check + # does a quick and dirty peek in the body for an error element. + if body.find('') > 0: + contains_error = True + boto.log.debug(body) + if response.status == 200 and not contains_error: + resp = CompleteMultiPartUpload(self) + h = handler.XmlHandler(resp, self) + xml.sax.parseString(body, h) + # Use a dummy key to parse various response headers + # for versioning, encryption info and then explicitly + # set the completed MPU object values from key. + k = self.key_class(self) + k.handle_version_headers(response) + k.handle_encryption_headers(response) + resp.version_id = k.version_id + resp.encrypted = k.encrypted + return resp + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def cancel_multipart_upload(self, key_name, upload_id, headers=None): + query_args = 'uploadId=%s' % upload_id + response = self.connection.make_request('DELETE', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status != 204: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete(self, headers=None): + return self.connection.delete_bucket(self.name, headers=headers) + + def get_tags(self): + response = self.get_xml_tags() + tags = Tags() + h = handler.XmlHandler(tags, self) + xml.sax.parseString(response, h) + return tags + + def get_xml_tags(self): + response = self.connection.make_request('GET', self.name, + query_args='tagging', + headers=None) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_xml_tags(self, tag_str, headers=None, query_args='tagging'): + if headers is None: + headers = {} + md5 = boto.utils.compute_md5(StringIO.StringIO(tag_str)) + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('PUT', self.name, + data=tag_str.encode('utf-8'), + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 204: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return True + + def set_tags(self, tags, headers=None): + return self.set_xml_tags(tags.to_xml(), headers=headers) + + def delete_tags(self, headers=None): + response = self.connection.make_request('DELETE', self.name, + query_args='tagging', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) diff --git a/awx/lib/site-packages/boto/s3/bucketlistresultset.py b/awx/lib/site-packages/boto/s3/bucketlistresultset.py new file mode 100644 index 0000000000..e11eb493a5 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/bucketlistresultset.py @@ -0,0 +1,139 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None): + """ + A generator function for listing keys in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_keys(prefix=prefix, marker=marker, + delimiter=delimiter, headers=headers) + for k in rs: + yield k + if k: + marker = rs.next_marker or k.name + more_results= rs.is_truncated + +class BucketListResultSet: + """ + A resultset for listing keys within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.marker = marker + self.headers = headers + + def __iter__(self): + return bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, marker=self.marker, + headers=self.headers) + +def versioned_bucket_lister(bucket, prefix='', delimiter='', + key_marker='', version_id_marker='', headers=None): + """ + A generator function for listing versions in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker, + version_id_marker=version_id_marker, + delimiter=delimiter, headers=headers, + max_keys=999) + for k in rs: + yield k + key_marker = rs.next_key_marker + version_id_marker = rs.next_version_id_marker + more_results= rs.is_truncated + +class VersionedBucketListResultSet: + """ + A resultset for listing versions within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', key_marker='', + version_id_marker='', headers=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.key_marker = key_marker + self.version_id_marker = version_id_marker + self.headers = headers + + def __iter__(self): + return versioned_bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, + key_marker=self.key_marker, + version_id_marker=self.version_id_marker, + headers=self.headers) + +def multipart_upload_lister(bucket, key_marker='', + upload_id_marker='', + headers=None): + """ + A generator function for listing multipart uploads in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_multipart_uploads(key_marker=key_marker, + upload_id_marker=upload_id_marker, + headers=headers) + for k in rs: + yield k + key_marker = rs.next_key_marker + upload_id_marker = rs.next_upload_id_marker + more_results= rs.is_truncated + +class MultiPartUploadListResultSet: + """ + A resultset for listing multipart uploads within a bucket. + Uses the multipart_upload_lister generator function and + implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of uploads within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + def __init__(self, bucket=None, key_marker='', + upload_id_marker='', headers=None): + self.bucket = bucket + self.key_marker = key_marker + self.upload_id_marker = upload_id_marker + self.headers = headers + + def __iter__(self): + return multipart_upload_lister(self.bucket, + key_marker=self.key_marker, + upload_id_marker=self.upload_id_marker, + headers=self.headers) diff --git a/awx/lib/site-packages/boto/s3/bucketlogging.py b/awx/lib/site-packages/boto/s3/bucketlogging.py new file mode 100644 index 0000000000..9e3c050d28 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/bucketlogging.py @@ -0,0 +1,83 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax.saxutils +from acl import Grant + +class BucketLogging: + + def __init__(self, target=None, prefix=None, grants=None): + self.target = target + self.prefix = prefix + if grants is None: + self.grants = [] + else: + self.grants = grants + + def __repr__(self): + if self.target is None: + return "" + grants = [] + for g in self.grants: + if g.type == 'CanonicalUser': + u = g.display_name + elif g.type == 'Group': + u = g.uri + else: + u = g.email_address + grants.append("%s = %s" % (u, g.permission)) + return "" % (self.target, self.prefix, ", ".join(grants)) + + def add_grant(self, grant): + self.grants.append(grant) + + def startElement(self, name, attrs, connection): + if name == 'Grant': + self.grants.append(Grant()) + return self.grants[-1] + else: + return None + + def endElement(self, name, value, connection): + if name == 'TargetBucket': + self.target = value + elif name == 'TargetPrefix': + self.prefix = value + else: + setattr(self, name, value) + + def to_xml(self): + # caller is responsible to encode to utf-8 + s = u'' + s += u'' + if self.target is not None: + s += u'' + s += u'%s' % self.target + prefix = self.prefix or '' + s += u'%s' % xml.sax.saxutils.escape(prefix) + if self.grants: + s += '' + for grant in self.grants: + s += grant.to_xml() + s += '' + s += u'' + s += u'' + return s diff --git a/awx/lib/site-packages/boto/s3/connection.py b/awx/lib/site-packages/boto/s3/connection.py new file mode 100644 index 0000000000..583fa16832 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/connection.py @@ -0,0 +1,548 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +import urllib +import base64 +import time + +import boto.utils +from boto.connection import AWSAuthConnection +from boto import handler +from boto.s3.bucket import Bucket +from boto.s3.key import Key +from boto.resultset import ResultSet +from boto.exception import BotoClientError, S3ResponseError + + +def check_lowercase_bucketname(n): + """ + Bucket names must not contain uppercase characters. We check for + this by appending a lowercase character and testing with islower(). + Note this also covers cases like numeric bucket names with dashes. + + >>> check_lowercase_bucketname("Aaaa") + Traceback (most recent call last): + ... + BotoClientError: S3Error: Bucket names cannot contain upper-case + characters when using either the sub-domain or virtual hosting calling + format. + + >>> check_lowercase_bucketname("1234-5678-9123") + True + >>> check_lowercase_bucketname("abcdefg1234") + True + """ + if not (n + 'a').islower(): + raise BotoClientError("Bucket names cannot contain upper-case " \ + "characters when using either the sub-domain or virtual " \ + "hosting calling format.") + return True + + +def assert_case_insensitive(f): + def wrapper(*args, **kwargs): + if len(args) == 3 and check_lowercase_bucketname(args[2]): + pass + return f(*args, **kwargs) + return wrapper + + +class _CallingFormat(object): + + def get_bucket_server(self, server, bucket): + return '' + + def build_url_base(self, connection, protocol, server, bucket, key=''): + url_base = '%s://' % protocol + url_base += self.build_host(server, bucket) + url_base += connection.get_path(self.build_path_base(bucket, key)) + return url_base + + def build_host(self, server, bucket): + if bucket == '': + return server + else: + return self.get_bucket_server(server, bucket) + + def build_auth_path(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + path = '' + if bucket != '': + path = '/' + bucket + return path + '/%s' % urllib.quote(key) + + def build_path_base(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + return '/%s' % urllib.quote(key) + + +class SubdomainCallingFormat(_CallingFormat): + + @assert_case_insensitive + def get_bucket_server(self, server, bucket): + return '%s.%s' % (bucket, server) + + +class VHostCallingFormat(_CallingFormat): + + @assert_case_insensitive + def get_bucket_server(self, server, bucket): + return bucket + + +class OrdinaryCallingFormat(_CallingFormat): + + def get_bucket_server(self, server, bucket): + return server + + def build_path_base(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + path_base = '/' + if bucket: + path_base += "%s/" % bucket + return path_base + urllib.quote(key) + + +class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat): + + def build_url_base(self, connection, protocol, server, bucket, key=''): + url_base = '//' + url_base += self.build_host(server, bucket) + url_base += connection.get_path(self.build_path_base(bucket, key)) + return url_base + + +class Location: + + DEFAULT = '' # US Classic Region + EU = 'EU' + USWest = 'us-west-1' + USWest2 = 'us-west-2' + SAEast = 'sa-east-1' + APNortheast = 'ap-northeast-1' + APSoutheast = 'ap-southeast-1' + APSoutheast2 = 'ap-southeast-2' + + +class S3Connection(AWSAuthConnection): + + DefaultHost = boto.config.get('s3', 'host', 's3.amazonaws.com') + DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat') + QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=DefaultHost, debug=0, https_connection_factory=None, + calling_format=DefaultCallingFormat, path='/', + provider='aws', bucket_class=Bucket, security_token=None, + suppress_consec_slashes=True, anon=False, + validate_certs=None): + if isinstance(calling_format, str): + calling_format=boto.utils.find_class(calling_format)() + self.calling_format = calling_format + self.bucket_class = bucket_class + self.anon = anon + AWSAuthConnection.__init__(self, host, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + debug=debug, https_connection_factory=https_connection_factory, + path=path, provider=provider, security_token=security_token, + suppress_consec_slashes=suppress_consec_slashes, + validate_certs=validate_certs) + + def _required_auth_capability(self): + if self.anon: + return ['anon'] + else: + return ['s3'] + + def __iter__(self): + for bucket in self.get_all_buckets(): + yield bucket + + def __contains__(self, bucket_name): + return not (self.lookup(bucket_name) is None) + + def set_bucket_class(self, bucket_class): + """ + Set the Bucket class associated with this bucket. By default, this + would be the boto.s3.key.Bucket class but if you want to subclass that + for some reason this allows you to associate your new class. + + :type bucket_class: class + :param bucket_class: A subclass of Bucket that can be more specific + """ + self.bucket_class = bucket_class + + def build_post_policy(self, expiration_time, conditions): + """ + Taken from the AWS book Python examples and modified for use with boto + """ + assert isinstance(expiration_time, time.struct_time), \ + 'Policy document must include a valid expiration Time object' + + # Convert conditions object mappings to condition statements + + return '{"expiration": "%s",\n"conditions": [%s]}' % \ + (time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions)) + + def build_post_form_args(self, bucket_name, key, expires_in=6000, + acl=None, success_action_redirect=None, + max_content_length=None, + http_method='http', fields=None, + conditions=None, storage_class='STANDARD', + server_side_encryption=None): + """ + Taken from the AWS book Python examples and modified for use with boto + This only returns the arguments required for the post form, not the + actual form. This does not return the file input field which also + needs to be added + + :type bucket_name: string + :param bucket_name: Bucket to submit to + + :type key: string + :param key: Key name, optionally add ${filename} to the end to + attach the submitted filename + + :type expires_in: integer + :param expires_in: Time (in seconds) before this expires, defaults + to 6000 + + :type acl: string + :param acl: A canned ACL. One of: + * private + * public-read + * public-read-write + * authenticated-read + * bucket-owner-read + * bucket-owner-full-control + + :type success_action_redirect: string + :param success_action_redirect: URL to redirect to on success + + :type max_content_length: integer + :param max_content_length: Maximum size for this file + + :type http_method: string + :param http_method: HTTP Method to use, "http" or "https" + + :type storage_class: string + :param storage_class: Storage class to use for storing the object. + Valid values: STANDARD | REDUCED_REDUNDANCY + + :type server_side_encryption: string + :param server_side_encryption: Specifies server-side encryption + algorithm to use when Amazon S3 creates an object. + Valid values: None | AES256 + + :rtype: dict + :return: A dictionary containing field names/values as well as + a url to POST to + + .. code-block:: python + + + """ + if fields == None: + fields = [] + if conditions == None: + conditions = [] + expiration = time.gmtime(int(time.time() + expires_in)) + + # Generate policy document + conditions.append('{"bucket": "%s"}' % bucket_name) + if key.endswith("${filename}"): + conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")]) + else: + conditions.append('{"key": "%s"}' % key) + if acl: + conditions.append('{"acl": "%s"}' % acl) + fields.append({"name": "acl", "value": acl}) + if success_action_redirect: + conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect) + fields.append({"name": "success_action_redirect", "value": success_action_redirect}) + if max_content_length: + conditions.append('["content-length-range", 0, %i]' % max_content_length) + + if self.provider.security_token: + fields.append({'name': 'x-amz-security-token', + 'value': self.provider.security_token}) + conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token) + + if storage_class: + fields.append({'name': 'x-amz-storage-class', + 'value': storage_class}) + conditions.append('{"x-amz-storage-class": "%s"}' % storage_class) + + if server_side_encryption: + fields.append({'name': 'x-amz-server-side-encryption', + 'value': server_side_encryption}) + conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption) + + policy = self.build_post_policy(expiration, conditions) + + # Add the base64-encoded policy document as the 'policy' field + policy_b64 = base64.b64encode(policy) + fields.append({"name": "policy", "value": policy_b64}) + + # Add the AWS access key as the 'AWSAccessKeyId' field + fields.append({"name": "AWSAccessKeyId", + "value": self.aws_access_key_id}) + + # Add signature for encoded policy document as the + # 'signature' field + signature = self._auth_handler.sign_string(policy_b64) + fields.append({"name": "signature", "value": signature}) + fields.append({"name": "key", "value": key}) + + # HTTPS protocol will be used if the secure HTTP option is enabled. + url = '%s://%s/' % (http_method, + self.calling_format.build_host(self.server_name(), + bucket_name)) + + return {"action": url, "fields": fields} + + def generate_url(self, expires_in, method, bucket='', key='', headers=None, + query_auth=True, force_http=False, response_headers=None, + expires_in_absolute=False, version_id=None): + headers = headers or {} + if expires_in_absolute: + expires = int(expires_in) + else: + expires = int(time.time() + expires_in) + auth_path = self.calling_format.build_auth_path(bucket, key) + auth_path = self.get_path(auth_path) + # optional version_id and response_headers need to be added to + # the query param list. + extra_qp = [] + if version_id is not None: + extra_qp.append("versionId=%s" % version_id) + if response_headers: + for k, v in response_headers.items(): + extra_qp.append("%s=%s" % (k, urllib.quote(v))) + if self.provider.security_token: + headers['x-amz-security-token'] = self.provider.security_token + if extra_qp: + delimiter = '?' if '?' not in auth_path else '&' + auth_path += delimiter + '&'.join(extra_qp) + c_string = boto.utils.canonical_string(method, auth_path, headers, + expires, self.provider) + b64_hmac = self._auth_handler.sign_string(c_string) + encoded_canonical = urllib.quote(b64_hmac, safe='') + self.calling_format.build_path_base(bucket, key) + if query_auth: + query_part = '?' + self.QueryString % (encoded_canonical, expires, + self.aws_access_key_id) + else: + query_part = '' + if headers: + hdr_prefix = self.provider.header_prefix + for k, v in headers.items(): + if k.startswith(hdr_prefix): + # headers used for sig generation must be + # included in the url also. + extra_qp.append("%s=%s" % (k, urllib.quote(v))) + if extra_qp: + delimiter = '?' if not query_part else '&' + query_part += delimiter + '&'.join(extra_qp) + if force_http: + protocol = 'http' + port = 80 + else: + protocol = self.protocol + port = self.port + return self.calling_format.build_url_base(self, protocol, + self.server_name(port), + bucket, key) + query_part + + def get_all_buckets(self, headers=None): + response = self.make_request('GET', headers=headers) + body = response.read() + if response.status > 300: + raise self.provider.storage_response_error( + response.status, response.reason, body) + rs = ResultSet([('Bucket', self.bucket_class)]) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + + def get_canonical_user_id(self, headers=None): + """ + Convenience method that returns the "CanonicalUserID" of the + user who's credentials are associated with the connection. + The only way to get this value is to do a GET request on the + service which returns all buckets associated with the account. + As part of that response, the canonical userid is returned. + This method simply does all of that and then returns just the + user id. + + :rtype: string + :return: A string containing the canonical user id. + """ + rs = self.get_all_buckets(headers=headers) + return rs.owner.id + + def get_bucket(self, bucket_name, validate=True, headers=None): + """ + Retrieves a bucket by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. If + you are unsure if the bucket exists or not, you can use the + ``S3Connection.lookup`` method, which will either return a valid bucket + or ``None``. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to fetch all keys within the + given bucket. (Default: ``True``) + """ + bucket = self.bucket_class(self, bucket_name) + if validate: + bucket.get_all_keys(headers, maxkeys=0) + return bucket + + def lookup(self, bucket_name, validate=True, headers=None): + """ + Attempts to get a bucket from S3. + + Works identically to ``S3Connection.get_bucket``, save for that it + will return ``None`` if the bucket does not exist instead of throwing + an exception. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to fetch all keys within the + given bucket. (Default: ``True``) + """ + try: + bucket = self.get_bucket(bucket_name, validate, headers=headers) + except: + bucket = None + return bucket + + def create_bucket(self, bucket_name, headers=None, + location=Location.DEFAULT, policy=None): + """ + Creates a new located bucket. By default it's in the USA. You can pass + Location.EU to create a European bucket (S3) or European Union bucket + (GCS). + + :type bucket_name: string + :param bucket_name: The name of the new bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to AWS. + + :type location: str + :param location: The location of the new bucket. You can use one of the + constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, + Location.USWest, etc.). + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + """ + check_lowercase_bucketname(bucket_name) + + if policy: + if headers: + headers[self.provider.acl_header] = policy + else: + headers = {self.provider.acl_header: policy} + if location == Location.DEFAULT: + data = '' + else: + data = '' + \ + location + '' + response = self.make_request('PUT', bucket_name, headers=headers, + data=data) + body = response.read() + if response.status == 409: + raise self.provider.storage_create_error( + response.status, response.reason, body) + if response.status == 200: + return self.bucket_class(self, bucket_name) + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def delete_bucket(self, bucket, headers=None): + """ + Removes an S3 bucket. + + In order to remove the bucket, it must first be empty. If the bucket is + not empty, an ``S3ResponseError`` will be raised. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + """ + response = self.make_request('DELETE', bucket, headers=headers) + body = response.read() + if response.status != 204: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def make_request(self, method, bucket='', key='', headers=None, data='', + query_args=None, sender=None, override_num_retries=None, + retry_handler=None): + if isinstance(bucket, self.bucket_class): + bucket = bucket.name + if isinstance(key, Key): + key = key.name + path = self.calling_format.build_path_base(bucket, key) + boto.log.debug('path=%s' % path) + auth_path = self.calling_format.build_auth_path(bucket, key) + boto.log.debug('auth_path=%s' % auth_path) + host = self.calling_format.build_host(self.server_name(), bucket) + if query_args: + path += '?' + query_args + boto.log.debug('path=%s' % path) + auth_path += '?' + query_args + boto.log.debug('auth_path=%s' % auth_path) + return AWSAuthConnection.make_request( + self, method, path, headers, + data, host, auth_path, sender, + override_num_retries=override_num_retries, + retry_handler=retry_handler + ) diff --git a/awx/lib/site-packages/boto/s3/cors.py b/awx/lib/site-packages/boto/s3/cors.py new file mode 100644 index 0000000000..d97ee890aa --- /dev/null +++ b/awx/lib/site-packages/boto/s3/cors.py @@ -0,0 +1,210 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class CORSRule(object): + """ + CORS rule for a bucket. + + :ivar id: A unique identifier for the rule. The ID value can be + up to 255 characters long. The IDs help you find a rule in + the configuration. + + :ivar allowed_methods: An HTTP method that you want to allow the + origin to execute. Each CORSRule must identify at least one + origin and one method. Valid values are: + GET|PUT|HEAD|POST|DELETE + + :ivar allowed_origin: An origin that you want to allow cross-domain + requests from. This can contain at most one * wild character. + Each CORSRule must identify at least one origin and one method. + The origin value can include at most one '*' wild character. + For example, "http://*.example.com". You can also specify + only * as the origin value allowing all origins cross-domain access. + + :ivar allowed_header: Specifies which headers are allowed in a + pre-flight OPTIONS request via the + Access-Control-Request-Headers header. Each header name + specified in the Access-Control-Request-Headers header must + have a corresponding entry in the rule. Amazon S3 will send + only the allowed headers in a response that were requested. + This can contain at most one * wild character. + + :ivar max_age_seconds: The time in seconds that your browser is to + cache the preflight response for the specified resource. + + :ivar expose_header: One or more headers in the response that you + want customers to be able to access from their applications + (for example, from a JavaScript XMLHttpRequest object). You + add one ExposeHeader element in the rule for each header. + """ + + def __init__(self, allowed_method=None, allowed_origin=None, + id=None, allowed_header=None, max_age_seconds=None, + expose_header=None): + if allowed_method is None: + allowed_method = [] + self.allowed_method = allowed_method + if allowed_origin is None: + allowed_origin = [] + self.allowed_origin = allowed_origin + self.id = id + if allowed_header is None: + allowed_header = [] + self.allowed_header = allowed_header + self.max_age_seconds = max_age_seconds + if expose_header is None: + expose_header = [] + self.expose_header = expose_header + + def __repr__(self): + return '' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'AllowedMethod': + self.allowed_method.append(value) + elif name == 'AllowedOrigin': + self.allowed_origin.append(value) + elif name == 'AllowedHeader': + self.allowed_header.append(value) + elif name == 'MaxAgeSeconds': + self.max_age_seconds = int(value) + elif name == 'ExposeHeader': + self.expose_header.append(value) + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + for allowed_method in self.allowed_method: + s += '%s' % allowed_method + for allowed_origin in self.allowed_origin: + s += '%s' % allowed_origin + for allowed_header in self.allowed_header: + s += '%s' % allowed_header + for expose_header in self.expose_header: + s += '%s' % expose_header + if self.max_age_seconds: + s += '%d' % self.max_age_seconds + if self.id: + s += '%s' % self.id + s += '' + return s + + +class CORSConfiguration(list): + """ + A container for the rules associated with a CORS configuration. + """ + + def startElement(self, name, attrs, connection): + if name == 'CORSRule': + rule = CORSRule() + self.append(rule) + return rule + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + """ + Returns a string containing the XML version of the Lifecycle + configuration as defined by S3. + """ + s = '' + for rule in self: + s += rule.to_xml() + s += '' + return s + + def add_rule(self, allowed_method, allowed_origin, + id=None, allowed_header=None, max_age_seconds=None, + expose_header=None): + """ + Add a rule to this CORS configuration. This only adds + the rule to the local copy. To install the new rule(s) on + the bucket, you need to pass this CORS config object + to the set_cors method of the Bucket object. + + :type allowed_methods: list of str + :param allowed_methods: An HTTP method that you want to allow the + origin to execute. Each CORSRule must identify at least one + origin and one method. Valid values are: + GET|PUT|HEAD|POST|DELETE + + :type allowed_origin: list of str + :param allowed_origin: An origin that you want to allow cross-domain + requests from. This can contain at most one * wild character. + Each CORSRule must identify at least one origin and one method. + The origin value can include at most one '*' wild character. + For example, "http://*.example.com". You can also specify + only * as the origin value allowing all origins + cross-domain access. + + :type id: str + :param id: A unique identifier for the rule. The ID value can be + up to 255 characters long. The IDs help you find a rule in + the configuration. + + :type allowed_header: list of str + :param allowed_header: Specifies which headers are allowed in a + pre-flight OPTIONS request via the + Access-Control-Request-Headers header. Each header name + specified in the Access-Control-Request-Headers header must + have a corresponding entry in the rule. Amazon S3 will send + only the allowed headers in a response that were requested. + This can contain at most one * wild character. + + :type max_age_seconds: int + :param max_age_seconds: The time in seconds that your browser is to + cache the preflight response for the specified resource. + + :type expose_header: list of str + :param expose_header: One or more headers in the response that you + want customers to be able to access from their applications + (for example, from a JavaScript XMLHttpRequest object). You + add one ExposeHeader element in the rule for each header. + """ + if not isinstance(allowed_method, (list, tuple)): + allowed_method = [allowed_method] + if not isinstance(allowed_origin, (list, tuple)): + allowed_origin = [allowed_origin] + if not isinstance(allowed_origin, (list, tuple)): + if allowed_origin is None: + allowed_origin = [] + else: + allowed_origin = [allowed_origin] + if not isinstance(expose_header, (list, tuple)): + if expose_header is None: + expose_header = [] + else: + expose_header = [expose_header] + rule = CORSRule(allowed_method, allowed_origin, id, allowed_header, + max_age_seconds, expose_header) + self.append(rule) diff --git a/awx/lib/site-packages/boto/s3/deletemarker.py b/awx/lib/site-packages/boto/s3/deletemarker.py new file mode 100644 index 0000000000..5db4343a91 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/deletemarker.py @@ -0,0 +1,55 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + +class DeleteMarker: + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.version_id = None + self.is_latest = False + self.last_modified = None + self.owner = None + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + else: + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value + elif name == 'IsLatest': + if value == 'true': + self.is_latest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + else: + setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/s3/key.py b/awx/lib/site-packages/boto/s3/key.py new file mode 100644 index 0000000000..2b7ae73ae7 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/key.py @@ -0,0 +1,1824 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Nexenta Systems Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from __future__ import with_statement +import errno +import mimetypes +import os +import re +import rfc822 +import StringIO +import base64 +import binascii +import math +import urllib +import boto.utils +from boto.exception import BotoClientError +from boto.exception import StorageDataError +from boto.exception import PleaseRetryException +from boto.provider import Provider +from boto.s3.keyfile import KeyFile +from boto.s3.user import User +from boto import UserAgent +from boto.utils import compute_md5 +from boto.utils import find_matching_headers +from boto.utils import merge_headers_by_name +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + + +class Key(object): + """ + Represents a key (object) in an S3 bucket. + + :ivar bucket: The parent :class:`boto.s3.bucket.Bucket`. + :ivar name: The name of this Key object. + :ivar metadata: A dictionary containing user metadata that you + wish to store with the object or that has been retrieved from + an existing object. + :ivar cache_control: The value of the `Cache-Control` HTTP header. + :ivar content_type: The value of the `Content-Type` HTTP header. + :ivar content_encoding: The value of the `Content-Encoding` HTTP header. + :ivar content_disposition: The value of the `Content-Disposition` HTTP + header. + :ivar content_language: The value of the `Content-Language` HTTP header. + :ivar etag: The `etag` associated with this object. + :ivar last_modified: The string timestamp representing the last + time this object was modified in S3. + :ivar owner: The ID of the owner of this object. + :ivar storage_class: The storage class of the object. Currently, one of: + STANDARD | REDUCED_REDUNDANCY | GLACIER + :ivar md5: The MD5 hash of the contents of the object. + :ivar size: The size, in bytes, of the object. + :ivar version_id: The version ID of this object, if it is a versioned + object. + :ivar encrypted: Whether the object is encrypted while at rest on + the server. + """ + + DefaultContentType = 'application/octet-stream' + + RestoreBody = """ + + %s + """ + + + BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192) + + # The object metadata fields a user can set, other than custom metadata + # fields (i.e., those beginning with a provider-specific prefix like + # x-amz-meta). + base_user_settable_fields = set(["cache-control", "content-disposition", + "content-encoding", "content-language", + "content-md5", "content-type"]) + _underscore_base_user_settable_fields = set() + for f in base_user_settable_fields: + _underscore_base_user_settable_fields.add(f.replace('-', '_')) + + + + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.metadata = {} + self.cache_control = None + self.content_type = self.DefaultContentType + self.content_encoding = None + self.content_disposition = None + self.content_language = None + self.filename = None + self.etag = None + self.is_latest = False + self.last_modified = None + self.owner = None + self.storage_class = 'STANDARD' + self.path = None + self.resp = None + self.mode = None + self.size = None + self.version_id = None + self.source_version_id = None + self.delete_marker = False + self.encrypted = None + # If the object is being restored, this attribute will be set to True. + # If the object is restored, it will be set to False. Otherwise this + # value will be None. If the restore is completed (ongoing_restore = + # False), the expiry_date will be populated with the expiry date of the + # restored object. + self.ongoing_restore = None + self.expiry_date = None + self.local_hashes = {} + + def __repr__(self): + if self.bucket: + return '' % (self.bucket.name, self.name) + else: + return '' % self.name + + def __iter__(self): + return self + + @property + def provider(self): + provider = None + if self.bucket and self.bucket.connection: + provider = self.bucket.connection.provider + return provider + + def _get_key(self): + return self.name + + def _set_key(self, value): + self.name = value + + key = property(_get_key, _set_key); + + def _get_md5(self): + if 'md5' in self.local_hashes and self.local_hashes['md5']: + return binascii.b2a_hex(self.local_hashes['md5']) + + def _set_md5(self, value): + if value: + self.local_hashes['md5'] = binascii.a2b_hex(value) + elif 'md5' in self.local_hashes: + self.local_hashes.pop('md5', None) + + md5 = property(_get_md5, _set_md5); + + def _get_base64md5(self): + if 'md5' in self.local_hashes and self.local_hashes['md5']: + return binascii.b2a_base64(self.local_hashes['md5']).rstrip('\n') + + def _set_base64md5(self, value): + if value: + self.local_hashes['md5'] = binascii.a2b_base64(value) + elif 'md5' in self.local_hashes: + del self.local_hashes['md5'] + + base64md5 = property(_get_base64md5, _set_base64md5); + + def get_md5_from_hexdigest(self, md5_hexdigest): + """ + A utility function to create the 2-tuple (md5hexdigest, base64md5) + from just having a precalculated md5_hexdigest. + """ + digest = binascii.unhexlify(md5_hexdigest) + base64md5 = base64.encodestring(digest) + if base64md5[-1] == '\n': + base64md5 = base64md5[0:-1] + return (md5_hexdigest, base64md5) + + def handle_encryption_headers(self, resp): + provider = self.bucket.connection.provider + if provider.server_side_encryption_header: + self.encrypted = resp.getheader( + provider.server_side_encryption_header, None) + else: + self.encrypted = None + + def handle_version_headers(self, resp, force=False): + provider = self.bucket.connection.provider + # If the Key object already has a version_id attribute value, it + # means that it represents an explicit version and the user is + # doing a get_contents_*(version_id=) to retrieve another + # version of the Key. In that case, we don't really want to + # overwrite the version_id in this Key object. Comprende? + if self.version_id is None or force: + self.version_id = resp.getheader(provider.version_id, None) + self.source_version_id = resp.getheader(provider.copy_source_version_id, + None) + if resp.getheader(provider.delete_marker, 'false') == 'true': + self.delete_marker = True + else: + self.delete_marker = False + + def handle_restore_headers(self, response): + header = response.getheader('x-amz-restore') + if header is None: + return + parts = header.split(',', 1) + for part in parts: + key, val = [i.strip() for i in part.split('=')] + val = val.replace('"', '') + if key == 'ongoing-request': + self.ongoing_restore = True if val.lower() == 'true' else False + elif key == 'expiry-date': + self.expiry_date = val + + def handle_addl_headers(self, headers): + """ + Used by Key subclasses to do additional, provider-specific + processing of response headers. No-op for this base class. + """ + pass + + def open_read(self, headers=None, query_args='', + override_num_retries=None, response_headers=None): + """ + Open this key for reading + + :type headers: dict + :param headers: Headers to pass in the web request + + :type query_args: string + :param query_args: Arguments to pass in the query string + (ie, 'torrent') + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying GET. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + """ + if self.resp == None: + self.mode = 'r' + + provider = self.bucket.connection.provider + self.resp = self.bucket.connection.make_request( + 'GET', self.bucket.name, self.name, headers, + query_args=query_args, + override_num_retries=override_num_retries) + if self.resp.status < 199 or self.resp.status > 299: + body = self.resp.read() + raise provider.storage_response_error(self.resp.status, + self.resp.reason, body) + response_headers = self.resp.msg + self.metadata = boto.utils.get_aws_metadata(response_headers, + provider) + for name, value in response_headers.items(): + # To get correct size for Range GETs, use Content-Range + # header if one was returned. If not, use Content-Length + # header. + if (name.lower() == 'content-length' and + 'Content-Range' not in response_headers): + self.size = int(value) + elif name.lower() == 'content-range': + end_range = re.sub('.*/(.*)', '\\1', value) + self.size = int(end_range) + elif name.lower() == 'etag': + self.etag = value + elif name.lower() == 'content-type': + self.content_type = value + elif name.lower() == 'content-encoding': + self.content_encoding = value + elif name.lower() == 'content-language': + self.content_language = value + elif name.lower() == 'last-modified': + self.last_modified = value + elif name.lower() == 'cache-control': + self.cache_control = value + elif name.lower() == 'content-disposition': + self.content_disposition = value + self.handle_version_headers(self.resp) + self.handle_encryption_headers(self.resp) + self.handle_addl_headers(self.resp.getheaders()) + + def open_write(self, headers=None, override_num_retries=None): + """ + Open this key for writing. + Not yet implemented + + :type headers: dict + :param headers: Headers to pass in the write request + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying PUT. + """ + raise BotoClientError('Not Implemented') + + def open(self, mode='r', headers=None, query_args=None, + override_num_retries=None): + if mode == 'r': + self.mode = 'r' + self.open_read(headers=headers, query_args=query_args, + override_num_retries=override_num_retries) + elif mode == 'w': + self.mode = 'w' + self.open_write(headers=headers, + override_num_retries=override_num_retries) + else: + raise BotoClientError('Invalid mode: %s' % mode) + + closed = False + + def close(self, fast=False): + """ + Close this key. + + :type fast: bool + :param fast: True if you want the connection to be closed without first + reading the content. This should only be used in cases where subsequent + calls don't need to return the content from the open HTTP connection. + Note: As explained at + http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse, + callers must read the whole response before sending a new request to the + server. Calling Key.close(fast=True) and making a subsequent request to + the server will work because boto will get an httplib exception and + close/reopen the connection. + + """ + if self.resp and not fast: + self.resp.read() + self.resp = None + self.mode = None + self.closed = True + + def next(self): + """ + By providing a next method, the key object supports use as an iterator. + For example, you can now say: + + for bytes in key: + write bytes to a file or whatever + + All of the HTTP connection stuff is handled for you. + """ + self.open_read() + data = self.resp.read(self.BufferSize) + if not data: + self.close() + raise StopIteration + return data + + def read(self, size=0): + self.open_read() + if size == 0: + data = self.resp.read() + else: + data = self.resp.read(size) + if not data: + self.close() + return data + + def change_storage_class(self, new_storage_class, dst_bucket=None, + validate_dst_bucket=True): + """ + Change the storage class of an existing key. + Depending on whether a different destination bucket is supplied + or not, this will either move the item within the bucket, preserving + all metadata and ACL info bucket changing the storage class or it + will copy the item to the provided destination bucket, also + preserving metadata and ACL info. + + :type new_storage_class: string + :param new_storage_class: The new storage class for the Key. + Possible values are: + * STANDARD + * REDUCED_REDUNDANCY + + :type dst_bucket: string + :param dst_bucket: The name of a destination bucket. If not + provided the current bucket of the key will be used. + + :type validate_dst_bucket: bool + :param validate_dst_bucket: If True, will validate the dst_bucket + by using an extra list request. + """ + if new_storage_class == 'STANDARD': + return self.copy(self.bucket.name, self.name, + reduced_redundancy=False, preserve_acl=True, + validate_dst_bucket=validate_dst_bucket) + elif new_storage_class == 'REDUCED_REDUNDANCY': + return self.copy(self.bucket.name, self.name, + reduced_redundancy=True, preserve_acl=True, + validate_dst_bucket=validate_dst_bucket) + else: + raise BotoClientError('Invalid storage class: %s' % + new_storage_class) + + def copy(self, dst_bucket, dst_key, metadata=None, + reduced_redundancy=False, preserve_acl=False, + encrypt_key=False, validate_dst_bucket=True): + """ + Copy this Key to another bucket. + + :type dst_bucket: string + :param dst_bucket: The name of the destination bucket + + :type dst_key: string + :param dst_key: The name of the destination key + + :type metadata: dict + :param metadata: Metadata to be associated with new key. If + metadata is supplied, it will replace the metadata of the + source key being copied. If no metadata is supplied, the + source key's metadata will be copied to the new key. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will force the + storage class of the new Key to be REDUCED_REDUNDANCY + regardless of the storage class of the key being copied. + The Reduced Redundancy Storage (RRS) feature of S3, + provides lower redundancy at lower storage cost. + + :type preserve_acl: bool + :param preserve_acl: If True, the ACL from the source key will + be copied to the destination key. If False, the + destination key will have the default ACL. Note that + preserving the ACL in the new key object will require two + additional API calls to S3, one to retrieve the current + ACL and one to set that ACL on the new object. If you + don't care about the ACL, a value of False will be + significantly more efficient. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type validate_dst_bucket: bool + :param validate_dst_bucket: If True, will validate the dst_bucket + by using an extra list request. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + dst_bucket = self.bucket.connection.lookup(dst_bucket, + validate_dst_bucket) + if reduced_redundancy: + storage_class = 'REDUCED_REDUNDANCY' + else: + storage_class = self.storage_class + return dst_bucket.copy_key(dst_key, self.bucket.name, + self.name, metadata, + storage_class=storage_class, + preserve_acl=preserve_acl, + encrypt_key=encrypt_key) + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + else: + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value + elif name == 'ETag': + self.etag = value + elif name == 'IsLatest': + if value == 'true': + self.is_latest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Size': + self.size = int(value) + elif name == 'StorageClass': + self.storage_class = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + else: + setattr(self, name, value) + + def exists(self, headers=None): + """ + Returns True if the key exists + + :rtype: bool + :return: Whether the key exists on S3 + """ + return bool(self.bucket.lookup(self.name, headers=headers)) + + def delete(self, headers=None): + """ + Delete this key from S3 + """ + return self.bucket.delete_key(self.name, version_id=self.version_id, + headers=headers) + + def get_metadata(self, name): + return self.metadata.get(name) + + def set_metadata(self, name, value): + # Ensure that metadata that is vital to signing is in the correct + # case. Applies to ``Content-Type`` & ``Content-MD5``. + if name.lower() == 'content-type': + self.metadata['Content-Type'] = value + elif name.lower() == 'content-md5': + self.metadata['Content-MD5'] = value + else: + self.metadata[name] = value + + def update_metadata(self, d): + self.metadata.update(d) + + # convenience methods for setting/getting ACL + def set_acl(self, acl_str, headers=None): + if self.bucket != None: + self.bucket.set_acl(acl_str, self.name, headers=headers) + + def get_acl(self, headers=None): + if self.bucket != None: + return self.bucket.get_acl(self.name, headers=headers) + + def get_xml_acl(self, headers=None): + if self.bucket != None: + return self.bucket.get_xml_acl(self.name, headers=headers) + + def set_xml_acl(self, acl_str, headers=None): + if self.bucket != None: + return self.bucket.set_xml_acl(acl_str, self.name, headers=headers) + + def set_canned_acl(self, acl_str, headers=None): + return self.bucket.set_canned_acl(acl_str, self.name, headers) + + def get_redirect(self): + """Return the redirect location configured for this key. + + If no redirect is configured (via set_redirect), then None + will be returned. + + """ + response = self.bucket.connection.make_request( + 'HEAD', self.bucket.name, self.name) + if response.status == 200: + return response.getheader('x-amz-website-redirect-location') + else: + raise self.provider.storage_response_error( + response.status, response.reason, response.read()) + + def set_redirect(self, redirect_location, headers=None): + """Configure this key to redirect to another location. + + When the bucket associated with this key is accessed from the website + endpoint, a 301 redirect will be issued to the specified + `redirect_location`. + + :type redirect_location: string + :param redirect_location: The location to redirect. + + """ + if headers is None: + headers = {} + else: + headers = headers.copy() + + headers['x-amz-website-redirect-location'] = redirect_location + response = self.bucket.connection.make_request('PUT', self.bucket.name, + self.name, headers) + if response.status == 200: + return True + else: + raise self.provider.storage_response_error( + response.status, response.reason, response.read()) + + def make_public(self, headers=None): + return self.bucket.set_canned_acl('public-read', self.name, headers) + + def generate_url(self, expires_in, method='GET', headers=None, + query_auth=True, force_http=False, response_headers=None, + expires_in_absolute=False, version_id=None, + policy=None, reduced_redundancy=False, encrypt_key=False): + """ + Generate a URL to access this key. + + :type expires_in: int + :param expires_in: How long the url is valid for, in seconds + + :type method: string + :param method: The method to use for retrieving the file + (default is GET) + + :type headers: dict + :param headers: Any headers to pass along in the request + + :type query_auth: bool + :param query_auth: + + :type force_http: bool + :param force_http: If True, http will be used instead of https. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type expires_in_absolute: bool + :param expires_in_absolute: + + :type version_id: string + :param version_id: The version_id of the object to GET. If specified + this overrides any value in the key. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :rtype: string + :return: The URL to access the key + """ + provider = self.bucket.connection.provider + version_id = version_id or self.version_id + if headers is None: + headers = {} + else: + headers = headers.copy() + + # add headers accordingly (usually PUT case) + if policy: + headers[provider.acl_header] = policy + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + headers = boto.utils.merge_meta(headers, self.metadata, provider) + + return self.bucket.connection.generate_url(expires_in, method, + self.bucket.name, self.name, + headers, query_auth, + force_http, + response_headers, + expires_in_absolute, + version_id) + + def send_file(self, fp, headers=None, cb=None, num_cb=10, + query_args=None, chunked_transfer=False, size=None): + """ + Upload a file to a key into a bucket on S3. + + :type fp: file + :param fp: The file pointer to upload. The file pointer must + point point at the offset from which you wish to upload. + ie. if uploading the full file, it should point at the + start of the file. Normally when a file is opened for + reading, the fp will point at the first byte. See the + bytes parameter below for more info. + + :type headers: dict + :param headers: The headers to pass along with the PUT request + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file + transfer. Providing a negative integer will cause your + callback to be called with each buffer read. + + :type query_args: string + :param query_args: (optional) Arguments to pass in the query string. + + :type chunked_transfer: boolean + :param chunked_transfer: (optional) If true, we use chunked + Transfer-Encoding. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where you are splitting the file + up into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + """ + self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + query_args=query_args, + chunked_transfer=chunked_transfer, size=size) + + def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10, + query_args=None, chunked_transfer=False, size=None, + hash_algs=None): + provider = self.bucket.connection.provider + try: + spos = fp.tell() + except IOError: + spos = None + self.read_from_stream = False + + # If hash_algs is unset and the MD5 hasn't already been computed, + # default to an MD5 hash_alg to hash the data on-the-fly. + if hash_algs is None and not self.md5: + hash_algs = {'md5': md5} + digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {}) + + def sender(http_conn, method, path, data, headers): + # This function is called repeatedly for temporary retries + # so we must be sure the file pointer is pointing at the + # start of the data. + if spos is not None and spos != fp.tell(): + fp.seek(spos) + elif spos is None and self.read_from_stream: + # if seek is not supported, and we've read from this + # stream already, then we need to abort retries to + # avoid setting bad data. + raise provider.storage_data_error( + 'Cannot retry failed request. fp does not support seeking.') + + # If the caller explicitly specified host header, tell putrequest + # not to add a second host header. Similarly for accept-encoding. + skips = {} + if boto.utils.find_matching_headers('host', headers): + skips['skip_host'] = 1 + if boto.utils.find_matching_headers('accept-encoding', headers): + skips['skip_accept_encoding'] = 1 + http_conn.putrequest(method, path, **skips) + for key in headers: + http_conn.putheader(key, headers[key]) + http_conn.endheaders() + + save_debug = self.bucket.connection.debug + self.bucket.connection.debug = 0 + # If the debuglevel < 4 we don't want to show connection + # payload, so turn off HTTP connection-level debug output (to + # be restored below). + # Use the getattr approach to allow this to work in AppEngine. + if getattr(http_conn, 'debuglevel', 0) < 4: + http_conn.set_debuglevel(0) + + data_len = 0 + if cb: + if size: + cb_size = size + elif self.size: + cb_size = self.size + else: + cb_size = 0 + if chunked_transfer and cb_size == 0: + # For chunked Transfer, we call the cb for every 1MB + # of data transferred, except when we know size. + cb_count = (1024 * 1024) / self.BufferSize + elif num_cb > 1: + cb_count = int( + math.ceil(cb_size / self.BufferSize / (num_cb - 1.0))) + elif num_cb < 0: + cb_count = -1 + else: + cb_count = 0 + i = 0 + cb(data_len, cb_size) + + bytes_togo = size + if bytes_togo and bytes_togo < self.BufferSize: + chunk = fp.read(bytes_togo) + else: + chunk = fp.read(self.BufferSize) + if spos is None: + # read at least something from a non-seekable fp. + self.read_from_stream = True + while chunk: + chunk_len = len(chunk) + data_len += chunk_len + if chunked_transfer: + http_conn.send('%x;\r\n' % chunk_len) + http_conn.send(chunk) + http_conn.send('\r\n') + else: + http_conn.send(chunk) + for alg in digesters: + digesters[alg].update(chunk) + if bytes_togo: + bytes_togo -= chunk_len + if bytes_togo <= 0: + break + if cb: + i += 1 + if i == cb_count or cb_count == -1: + cb(data_len, cb_size) + i = 0 + if bytes_togo and bytes_togo < self.BufferSize: + chunk = fp.read(bytes_togo) + else: + chunk = fp.read(self.BufferSize) + + self.size = data_len + + for alg in digesters: + self.local_hashes[alg] = digesters[alg].digest() + + if chunked_transfer: + http_conn.send('0\r\n') + # http_conn.send("Content-MD5: %s\r\n" % self.base64md5) + http_conn.send('\r\n') + + if cb and (cb_count <= 1 or i > 0) and data_len > 0: + cb(data_len, cb_size) + + http_conn.set_debuglevel(save_debug) + self.bucket.connection.debug = save_debug + response = http_conn.getresponse() + body = response.read() + + if not self.should_retry(response, chunked_transfer): + raise provider.storage_response_error( + response.status, response.reason, body) + + return response + + if not headers: + headers = {} + else: + headers = headers.copy() + # Overwrite user-supplied user-agent. + for header in find_matching_headers('User-Agent', headers): + del headers[header] + headers['User-Agent'] = UserAgent + if self.storage_class != 'STANDARD': + headers[provider.storage_class_header] = self.storage_class + if find_matching_headers('Content-Encoding', headers): + self.content_encoding = merge_headers_by_name( + 'Content-Encoding', headers) + if find_matching_headers('Content-Language', headers): + self.content_language = merge_headers_by_name( + 'Content-Language', headers) + content_type_headers = find_matching_headers('Content-Type', headers) + if content_type_headers: + # Some use cases need to suppress sending of the Content-Type + # header and depend on the receiving server to set the content + # type. This can be achieved by setting headers['Content-Type'] + # to None when calling this method. + if (len(content_type_headers) == 1 and + headers[content_type_headers[0]] is None): + # Delete null Content-Type value to skip sending that header. + del headers[content_type_headers[0]] + else: + self.content_type = merge_headers_by_name( + 'Content-Type', headers) + elif self.path: + self.content_type = mimetypes.guess_type(self.path)[0] + if self.content_type == None: + self.content_type = self.DefaultContentType + headers['Content-Type'] = self.content_type + else: + headers['Content-Type'] = self.content_type + if self.base64md5: + headers['Content-MD5'] = self.base64md5 + if chunked_transfer: + headers['Transfer-Encoding'] = 'chunked' + #if not self.base64md5: + # headers['Trailer'] = "Content-MD5" + else: + headers['Content-Length'] = str(self.size) + headers['Expect'] = '100-Continue' + headers = boto.utils.merge_meta(headers, self.metadata, provider) + resp = self.bucket.connection.make_request( + 'PUT', + self.bucket.name, + self.name, + headers, + sender=sender, + query_args=query_args + ) + self.handle_version_headers(resp, force=True) + self.handle_addl_headers(resp.getheaders()) + + def should_retry(self, response, chunked_transfer=False): + provider = self.bucket.connection.provider + + if not chunked_transfer: + if response.status in [500, 503]: + # 500 & 503 can be plain retries. + return True + + if response.getheader('location'): + # If there's a redirect, plain retry. + return True + + if 200 <= response.status <= 299: + self.etag = response.getheader('etag') + + if self.etag != '"%s"' % self.md5: + raise provider.storage_data_error( + 'ETag from S3 did not match computed MD5') + + return True + + if response.status == 400: + # The 400 must be trapped so the retry handler can check to + # see if it was a timeout. + # If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb + # out. + body = response.read() + err = provider.storage_response_error( + response.status, + response.reason, + body + ) + + if err.error_code in ['RequestTimeout']: + raise PleaseRetryException( + "Saw %s, retrying" % err.error_code, + response=response + ) + + return False + + def compute_md5(self, fp, size=None): + """ + :type fp: file + :param fp: File pointer to the file to MD5 hash. The file + pointer will be reset to the same position before the + method returns. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where the file is being split + in place into different parts. Less bytes may be available. + """ + hex_digest, b64_digest, data_size = compute_md5(fp, size=size) + # Returned values are MD5 hash, base64 encoded MD5 hash, and data size. + # The internal implementation of compute_md5() needs to return the + # data size but we don't want to return that value to the external + # caller because it changes the class interface (i.e. it might + # break some code) so we consume the third tuple value here and + # return the remainder of the tuple to the caller, thereby preserving + # the existing interface. + self.size = data_size + return (hex_digest, b64_digest) + + def set_contents_from_stream(self, fp, headers=None, replace=True, + cb=None, num_cb=10, policy=None, + reduced_redundancy=False, query_args=None, + size=None): + """ + Store an object using the name of the Key object as the key in + cloud and the contents of the data stream pointed to by 'fp' as + the contents. + + The stream object is not seekable and total size is not known. + This has the implication that we can't specify the + Content-Size and Content-MD5 in the header. So for huge + uploads, the delay in calculating MD5 is avoided but with a + penalty of inability to verify the integrity of the uploaded + data. + + :type fp: file + :param fp: the file whose contents are to be uploaded + + :type headers: dict + :param headers: additional HTTP headers to be sent with the + PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method will first check + to see if an object exists in the bucket with the same key. If it + does, it won't overwrite it. The default value is True which will + overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter, this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key + in GS. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from + the file pointer (fp). This is useful when uploading a + file in multiple parts where you are splitting the file up + into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + """ + + provider = self.bucket.connection.provider + if not provider.supports_chunked_transfer(): + raise BotoClientError('%s does not support chunked transfer' + % provider.get_provider_name()) + + # Name of the Object should be specified explicitly for Streams. + if not self.name or self.name == '': + raise BotoClientError('Cannot determine the destination ' + 'object name for the given stream') + + if headers is None: + headers = {} + if policy: + headers[provider.acl_header] = policy + + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + + if self.bucket != None: + if not replace: + if self.bucket.lookup(self.name): + return + self.send_file(fp, headers, cb, num_cb, query_args, + chunked_transfer=True, size=size) + + def set_contents_from_file(self, fp, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, query_args=None, + encrypt_key=False, size=None, rewind=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the contents of the file pointed to by 'fp' as the + contents. The data is read from 'fp' from its current position until + 'size' bytes have been read or EOF. + + :type fp: file + :param fp: the file whose contents to upload + + :type headers: dict + :param headers: Additional HTTP headers that will be sent with + the PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method will + first check to see if an object exists in the bucket with + the same key. If it does, it won't overwrite it. The + default value is True which will overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where you are splitting the file + up into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + + :type rewind: bool + :param rewind: (optional) If True, the file pointer (fp) will + be rewound to the start before any bytes are read from + it. The default behaviour is False which reads from the + current position of the file pointer (fp). + + :rtype: int + :return: The number of bytes written to the key. + """ + provider = self.bucket.connection.provider + headers = headers or {} + if policy: + headers[provider.acl_header] = policy + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + + if rewind: + # caller requests reading from beginning of fp. + fp.seek(0, os.SEEK_SET) + else: + # The following seek/tell/seek logic is intended + # to detect applications using the older interface to + # set_contents_from_file(), which automatically rewound the + # file each time the Key was reused. This changed with commit + # 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads + # split into multiple parts and uploaded in parallel, and at + # the time of that commit this check was added because otherwise + # older programs would get a success status and upload an empty + # object. Unfortuantely, it's very inefficient for fp's implemented + # by KeyFile (used, for example, by gsutil when copying between + # providers). So, we skip the check for the KeyFile case. + # TODO: At some point consider removing this seek/tell/seek + # logic, after enough time has passed that it's unlikely any + # programs remain that assume the older auto-rewind interface. + if not isinstance(fp, KeyFile): + spos = fp.tell() + fp.seek(0, os.SEEK_END) + if fp.tell() == spos: + fp.seek(0, os.SEEK_SET) + if fp.tell() != spos: + # Raise an exception as this is likely a programming + # error whereby there is data before the fp but nothing + # after it. + fp.seek(spos) + raise AttributeError('fp is at EOF. Use rewind option ' + 'or seek() to data start.') + # seek back to the correct position. + fp.seek(spos) + + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + # TODO - What if provider doesn't support reduced reduncancy? + # What if different providers provide different classes? + if hasattr(fp, 'name'): + self.path = fp.name + if self.bucket != None: + if not md5 and provider.supports_chunked_transfer(): + # defer md5 calculation to on the fly and + # we don't know anything about size yet. + chunked_transfer = True + self.size = None + else: + chunked_transfer = False + if isinstance(fp, KeyFile): + # Avoid EOF seek for KeyFile case as it's very inefficient. + key = fp.getkey() + size = key.size - fp.tell() + self.size = size + # At present both GCS and S3 use MD5 for the etag for + # non-multipart-uploaded objects. If the etag is 32 hex + # chars use it as an MD5, to avoid having to read the file + # twice while transferring. + if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)): + etag = key.etag.strip('"') + md5 = (etag, base64.b64encode(binascii.unhexlify(etag))) + if not md5: + # compute_md5() and also set self.size to actual + # size of the bytes read computing the md5. + md5 = self.compute_md5(fp, size) + # adjust size if required + size = self.size + elif size: + self.size = size + else: + # If md5 is provided, still need to size so + # calculate based on bytes to end of content + spos = fp.tell() + fp.seek(0, os.SEEK_END) + self.size = fp.tell() - spos + fp.seek(spos) + size = self.size + self.md5 = md5[0] + self.base64md5 = md5[1] + + if self.name == None: + self.name = self.md5 + if not replace: + if self.bucket.lookup(self.name): + return + + self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb, + query_args=query_args, + chunked_transfer=chunked_transfer, size=size) + # return number of bytes written. + return self.size + + def set_contents_from_filename(self, filename, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, + encrypt_key=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the contents of the file named by 'filename'. + See set_contents_from_file method for details about the + parameters. + + :type filename: string + :param filename: The name of the file that you want to put onto S3 + + :type headers: dict + :param headers: Additional headers to pass along with the + request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file + if it already exists. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object + will be encrypted on the server-side by S3 and will be + stored in an encrypted form while at rest in S3. + + :rtype: int + :return: The number of bytes written to the key. + """ + with open(filename, 'rb') as fp: + return self.set_contents_from_file(fp, headers, replace, cb, + num_cb, policy, md5, + reduced_redundancy, + encrypt_key=encrypt_key) + + def set_contents_from_string(self, s, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, + encrypt_key=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the string 's' as the contents. + See set_contents_from_file method for details about the + parameters. + + :type headers: dict + :param headers: Additional headers to pass along with the + request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file if + it already exists. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + """ + if isinstance(s, unicode): + s = s.encode("utf-8") + fp = StringIO.StringIO(s) + r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, + policy, md5, reduced_redundancy, + encrypt_key=encrypt_key) + fp.close() + return r + + def get_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, override_num_retries=None, + response_headers=None): + """ + Retrieves a file from an S3 Key + + :type fp: file + :param fp: File pointer to put the data into + + :type headers: string + :param: headers to send when retrieving the files + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: Flag for whether to get a torrent for the file + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying GET. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + """ + self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + torrent=torrent, version_id=version_id, + override_num_retries=override_num_retries, + response_headers=response_headers, + hash_algs=None, + query_args=None) + + def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, override_num_retries=None, + response_headers=None, hash_algs=None, query_args=None): + if headers is None: + headers = {} + save_debug = self.bucket.connection.debug + if self.bucket.connection.debug == 1: + self.bucket.connection.debug = 0 + + query_args = query_args or [] + if torrent: + query_args.append('torrent') + + if hash_algs is None and not torrent: + hash_algs = {'md5': md5} + digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {}) + + # If a version_id is passed in, use that. If not, check to see + # if the Key object has an explicit version_id and, if so, use that. + # Otherwise, don't pass a version_id query param. + if version_id is None: + version_id = self.version_id + if version_id: + query_args.append('versionId=%s' % version_id) + if response_headers: + for key in response_headers: + query_args.append('%s=%s' % ( + key, urllib.quote(response_headers[key]))) + query_args = '&'.join(query_args) + self.open('r', headers, query_args=query_args, + override_num_retries=override_num_retries) + + data_len = 0 + if cb: + if self.size is None: + cb_size = 0 + else: + cb_size = self.size + if self.size is None and num_cb != -1: + # If size is not available due to chunked transfer for example, + # we'll call the cb for every 1MB of data transferred. + cb_count = (1024 * 1024) / self.BufferSize + elif num_cb > 1: + cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0))) + elif num_cb < 0: + cb_count = -1 + else: + cb_count = 0 + i = 0 + cb(data_len, cb_size) + try: + for bytes in self: + fp.write(bytes) + data_len += len(bytes) + for alg in digesters: + digesters[alg].update(bytes) + if cb: + if cb_size > 0 and data_len >= cb_size: + break + i += 1 + if i == cb_count or cb_count == -1: + cb(data_len, cb_size) + i = 0 + except IOError, e: + if e.errno == errno.ENOSPC: + raise StorageDataError('Out of space for destination file ' + '%s' % fp.name) + raise + if cb and (cb_count <= 1 or i > 0) and data_len > 0: + cb(data_len, cb_size) + for alg in digesters: + self.local_hashes[alg] = digesters[alg].digest() + if self.size is None and not torrent and "Range" not in headers: + self.size = data_len + self.close() + self.bucket.connection.debug = save_debug + + def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10): + """ + Get a torrent file (see to get_file) + + :type fp: file + :param fp: The file pointer of where to put the torrent + + :type headers: dict + :param headers: Headers to be passed + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + """ + return self.get_file(fp, headers, cb, num_cb, torrent=True) + + def get_contents_to_file(self, fp, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + res_download_handler=None, + response_headers=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Write the contents of the object to the file pointed + to by 'fp'. + + :type fp: File -like object + :param fp: + + :type headers: dict + :param headers: additional HTTP headers that will be sent with + the GET request. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent + file as a string. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: If provided, this handler will + perform the download. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + """ + if self.bucket != None: + if res_download_handler: + res_download_handler.get_file(self, fp, headers, cb, num_cb, + torrent=torrent, + version_id=version_id) + else: + self.get_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + response_headers=response_headers) + + def get_contents_to_filename(self, filename, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + res_download_handler=None, + response_headers=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Store contents of the object to a file named by 'filename'. + See get_contents_to_file method for details about the + parameters. + + :type filename: string + :param filename: The filename of where to put the file contents + + :type headers: dict + :param headers: Any additional headers to send in the request + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file + as a string. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: If provided, this handler will + perform the download. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + """ + fp = open(filename, 'wb') + try: + self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + res_download_handler=res_download_handler, + response_headers=response_headers) + except Exception: + os.remove(filename) + raise + finally: + fp.close() + # if last_modified date was sent from s3, try to set file's timestamp + if self.last_modified != None: + try: + modified_tuple = rfc822.parsedate_tz(self.last_modified) + modified_stamp = int(rfc822.mktime_tz(modified_tuple)) + os.utime(fp.name, (modified_stamp, modified_stamp)) + except Exception: + pass + + def get_contents_as_string(self, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + response_headers=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Return the contents of the object as a string. + See get_contents_to_file method for details about the + parameters. + + :type headers: dict + :param headers: Any additional headers to send in the request + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file + as a string. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :rtype: string + :returns: The contents of the file as a string + """ + fp = StringIO.StringIO() + self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + response_headers=response_headers) + return fp.getvalue() + + def add_email_grant(self, permission, email_address, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + """ + policy = self.get_acl(headers=headers) + policy.acl.add_email_grant(permission, email_address) + self.set_acl(policy, headers=headers) + + def add_user_grant(self, permission, user_id, headers=None, + display_name=None): + """ + Convenience method that provides a quick way to add a canonical + user grant to a key. This method retrieves the current ACL, + creates a new grant based on the parameters passed in, adds that + grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type user_id: string + :param user_id: The canonical user id associated with the AWS + account your are granting the permission to. + + :type display_name: string + :param display_name: An option string containing the user's + Display Name. Only required on Walrus. + """ + policy = self.get_acl(headers=headers) + policy.acl.add_user_grant(permission, user_id, + display_name=display_name) + self.set_acl(policy, headers=headers) + + def _normalize_metadata(self, metadata): + if type(metadata) == set: + norm_metadata = set() + for k in metadata: + norm_metadata.add(k.lower()) + else: + norm_metadata = {} + for k in metadata: + norm_metadata[k.lower()] = metadata[k] + return norm_metadata + + def _get_remote_metadata(self, headers=None): + """ + Extracts metadata from existing URI into a dict, so we can + overwrite/delete from it to form the new set of metadata to apply to a + key. + """ + metadata = {} + for underscore_name in self._underscore_base_user_settable_fields: + if hasattr(self, underscore_name): + value = getattr(self, underscore_name) + if value: + # Generate HTTP field name corresponding to "_" named field. + field_name = underscore_name.replace('_', '-') + metadata[field_name.lower()] = value + # self.metadata contains custom metadata, which are all user-settable. + prefix = self.provider.metadata_prefix + for underscore_name in self.metadata: + field_name = underscore_name.replace('_', '-') + metadata['%s%s' % (prefix, field_name.lower())] = ( + self.metadata[underscore_name]) + return metadata + + def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl, + headers=None): + metadata_plus = self._normalize_metadata(metadata_plus) + metadata_minus = self._normalize_metadata(metadata_minus) + metadata = self._get_remote_metadata() + metadata.update(metadata_plus) + for h in metadata_minus: + if h in metadata: + del metadata[h] + src_bucket = self.bucket + # Boto prepends the meta prefix when adding headers, so strip prefix in + # metadata before sending back in to copy_key() call. + rewritten_metadata = {} + for h in metadata: + if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')): + rewritten_h = (h.replace('x-goog-meta-', '') + .replace('x-amz-meta-', '')) + else: + rewritten_h = h + rewritten_metadata[rewritten_h] = metadata[h] + metadata = rewritten_metadata + src_bucket.copy_key(self.name, self.bucket.name, self.name, + metadata=metadata, preserve_acl=preserve_acl, + headers=headers) + + def restore(self, days, headers=None): + """Restore an object from an archive. + + :type days: int + :param days: The lifetime of the restored object (must + be at least 1 day). If the object is already restored + then this parameter can be used to readjust the lifetime + of the restored object. In this case, the days + param is with respect to the initial time of the request. + If the object has not been restored, this param is with + respect to the completion time of the request. + + """ + response = self.bucket.connection.make_request( + 'POST', self.bucket.name, self.name, + data=self.RestoreBody % days, + headers=headers, query_args='restore') + if response.status not in (200, 202): + provider = self.bucket.connection.provider + raise provider.storage_response_error(response.status, + response.reason, + response.read()) diff --git a/awx/lib/site-packages/boto/s3/keyfile.py b/awx/lib/site-packages/boto/s3/keyfile.py new file mode 100644 index 0000000000..84858a2ba2 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/keyfile.py @@ -0,0 +1,134 @@ +# Copyright 2013 Google Inc. +# Copyright 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Wrapper class to expose a Key being read via a partial implementaiton of the +Python file interface. The only functions supported are those needed for seeking +in a Key open for reading. +""" + +import os +from boto.exception import StorageResponseError + +class KeyFile(): + + def __init__(self, key): + self.key = key + self.key.open_read() + self.location = 0 + self.closed = False + self.softspace = -1 # Not implemented. + self.mode = 'r' + self.encoding = 'Undefined in KeyFile' + self.errors = 'Undefined in KeyFile' + self.newlines = 'Undefined in KeyFile' + self.name = key.name + + def tell(self): + if self.location is None: + raise ValueError("I/O operation on closed file") + return self.location + + def seek(self, pos, whence=os.SEEK_SET): + self.key.close(fast=True) + if whence == os.SEEK_END: + # We need special handling for this case because sending an HTTP range GET + # with EOF for the range start would cause an invalid range error. Instead + # we position to one before EOF (plus pos) and then read one byte to + # position at EOF. + if self.key.size == 0: + # Don't try to seek with an empty key. + return + pos = self.key.size + pos - 1 + if pos < 0: + raise IOError("Invalid argument") + self.key.open_read(headers={"Range": "bytes=%d-" % pos}) + self.key.read(1) + self.location = pos + 1 + return + + if whence == os.SEEK_SET: + if pos < 0: + raise IOError("Invalid argument") + elif whence == os.SEEK_CUR: + pos += self.location + else: + raise IOError('Invalid whence param (%d) passed to seek' % whence) + try: + self.key.open_read(headers={"Range": "bytes=%d-" % pos}) + except StorageResponseError, e: + # 416 Invalid Range means that the given starting byte was past the end + # of file. We catch this because the Python file interface allows silently + # seeking past the end of the file. + if e.status != 416: + raise + + self.location = pos + + def read(self, size): + self.location += size + return self.key.read(size) + + def close(self): + self.key.close() + self.location = None + self.closed = True + + def isatty(self): + return False + + # Non-file interface, useful for code that wants to dig into underlying Key + # state. + def getkey(self): + return self.key + + # Unimplemented interfaces below here. + + def write(self, buf): + raise NotImplementedError('write not implemented in KeyFile') + + def fileno(self): + raise NotImplementedError('fileno not implemented in KeyFile') + + def flush(self): + raise NotImplementedError('flush not implemented in KeyFile') + + def next(self): + raise NotImplementedError('next not implemented in KeyFile') + + def readinto(self): + raise NotImplementedError('readinto not implemented in KeyFile') + + def readline(self): + raise NotImplementedError('readline not implemented in KeyFile') + + def readlines(self): + raise NotImplementedError('readlines not implemented in KeyFile') + + def truncate(self): + raise NotImplementedError('truncate not implemented in KeyFile') + + def writelines(self): + raise NotImplementedError('writelines not implemented in KeyFile') + + def xreadlines(self): + raise NotImplementedError('xreadlines not implemented in KeyFile') diff --git a/awx/lib/site-packages/boto/s3/lifecycle.py b/awx/lib/site-packages/boto/s3/lifecycle.py new file mode 100644 index 0000000000..58126e6d5b --- /dev/null +++ b/awx/lib/site-packages/boto/s3/lifecycle.py @@ -0,0 +1,231 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Rule(object): + """ + A Lifcycle rule for an S3 bucket. + + :ivar id: Unique identifier for the rule. The value cannot be longer + than 255 characters. + + :ivar prefix: Prefix identifying one or more objects to which the + rule applies. + + :ivar status: If Enabled, the rule is currently being applied. + If Disabled, the rule is not currently being applied. + + :ivar expiration: An instance of `Expiration`. This indicates + the lifetime of the objects that are subject to the rule. + + :ivar transition: An instance of `Transition`. This indicates + when to transition to a different storage class. + + """ + def __init__(self, id=None, prefix=None, status=None, expiration=None, + transition=None): + self.id = id + self.prefix = prefix + self.status = status + if isinstance(expiration, (int, long)): + # retain backwards compatibility??? + self.expiration = Expiration(days=expiration) + else: + # None or object + self.expiration = expiration + self.transition = transition + + def __repr__(self): + return '' % self.id + + def startElement(self, name, attrs, connection): + if name == 'Transition': + self.transition = Transition() + return self.transition + elif name == 'Expiration': + self.expiration = Expiration() + return self.expiration + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'Prefix': + self.prefix = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + s += '%s' % self.id + s += '%s' % self.prefix + s += '%s' % self.status + if self.expiration is not None: + s += self.expiration.to_xml() + if self.transition is not None: + s += self.transition.to_xml() + s += '' + return s + +class Expiration(object): + """ + When an object will expire. + + :ivar days: The number of days until the object expires + + :ivar date: The date when the object will expire. Must be + in ISO 8601 format. + """ + def __init__(self, days=None, date=None): + self.days = days + self.date = date + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Days': + self.days = int(value) + elif name == 'Date': + self.date = value + + def __repr__(self): + if self.days is None: + how_long = "on: %s" % self.date + else: + how_long = "in: %s days" % self.days + return '' % how_long + + def to_xml(self): + s = '' + if self.days is not None: + s += '%s' % self.days + elif self.date is not None: + s += '%s' % self.date + s += '' + return s + +class Transition(object): + """ + A transition to a different storage class. + + :ivar days: The number of days until the object should be moved. + + :ivar date: The date when the object should be moved. Should be + in ISO 8601 format. + + :ivar storage_class: The storage class to transition to. Valid + values are GLACIER. + + """ + def __init__(self, days=None, date=None, storage_class=None): + self.days = days + self.date = date + self.storage_class = storage_class + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Days': + self.days = int(value) + elif name == 'Date': + self.date = value + elif name == 'StorageClass': + self.storage_class = value + + def __repr__(self): + if self.days is None: + how_long = "on: %s" % self.date + else: + how_long = "in: %s days" % self.days + return '' % (how_long, self.storage_class) + + def to_xml(self): + s = '' + s += '%s' % self.storage_class + if self.days is not None: + s += '%s' % self.days + elif self.date is not None: + s += '%s' % self.date + s += '' + return s + +class Lifecycle(list): + """ + A container for the rules associated with a Lifecycle configuration. + """ + + def startElement(self, name, attrs, connection): + if name == 'Rule': + rule = Rule() + self.append(rule) + return rule + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + """ + Returns a string containing the XML version of the Lifecycle + configuration as defined by S3. + """ + s = '' + s += '' + for rule in self: + s += rule.to_xml() + s += '' + return s + + def add_rule(self, id, prefix, status, expiration, transition=None): + """ + Add a rule to this Lifecycle configuration. This only adds + the rule to the local copy. To install the new rule(s) on + the bucket, you need to pass this Lifecycle config object + to the configure_lifecycle method of the Bucket object. + + :type id: str + :param id: Unique identifier for the rule. The value cannot be longer + than 255 characters. + + :type prefix: str + :iparam prefix: Prefix identifying one or more objects to which the + rule applies. + + :type status: str + :param status: If 'Enabled', the rule is currently being applied. + If 'Disabled', the rule is not currently being applied. + + :type expiration: int + :param expiration: Indicates the lifetime, in days, of the objects + that are subject to the rule. The value must be a non-zero + positive integer. A Expiration object instance is also perfect. + + :type transition: Transition + :param transition: Indicates when an object transitions to a + different storage class. + """ + rule = Rule(id, prefix, status, expiration, transition) + self.append(rule) diff --git a/awx/lib/site-packages/boto/s3/multidelete.py b/awx/lib/site-packages/boto/s3/multidelete.py new file mode 100644 index 0000000000..3e2d48e32d --- /dev/null +++ b/awx/lib/site-packages/boto/s3/multidelete.py @@ -0,0 +1,138 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto import handler +import xml.sax + +class Deleted(object): + """ + A successfully deleted object in a multi-object delete request. + + :ivar key: Key name of the object that was deleted. + + :ivar version_id: Version id of the object that was deleted. + + :ivar delete_marker: If True, indicates the object deleted + was a DeleteMarker. + + :ivar delete_marker_version_id: Version ID of the delete marker + deleted. + """ + def __init__(self, key=None, version_id=None, + delete_marker=False, delete_marker_version_id=None): + self.key = key + self.version_id = version_id + self.delete_marker = delete_marker + self.delete_marker_version_id = delete_marker_version_id + + def __repr__(self): + if self.version_id: + return '' % (self.key, self.version_id) + else: + return '' % self.key + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'VersionId': + self.version_id = value + elif name == 'DeleteMarker': + if value.lower() == 'true': + self.delete_marker = True + elif name == 'DeleteMarkerVersionId': + self.delete_marker_version_id = value + else: + setattr(self, name, value) + +class Error(object): + """ + An unsuccessful deleted object in a multi-object delete request. + + :ivar key: Key name of the object that was not deleted. + + :ivar version_id: Version id of the object that was not deleted. + + :ivar code: Status code of the failed delete operation. + + :ivar message: Status message of the failed delete operation. + """ + def __init__(self, key=None, version_id=None, + code=None, message=None): + self.key = key + self.version_id = version_id + self.code = code + self.message = message + + def __repr__(self): + if self.version_id: + return '' % (self.key, self.version_id, + self.code) + else: + return '' % (self.key, self.code) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'VersionId': + self.version_id = value + elif name == 'Code': + self.code = value + elif name == 'Message': + self.message = value + else: + setattr(self, name, value) + +class MultiDeleteResult(object): + """ + The status returned from a MultiObject Delete request. + + :ivar deleted: A list of successfully deleted objects. Note that if + the quiet flag was specified in the request, this list will + be empty because only error responses would be returned. + + :ivar errors: A list of unsuccessfully deleted objects. + """ + + def __init__(self, bucket=None): + self.bucket = None + self.deleted = [] + self.errors = [] + + def startElement(self, name, attrs, connection): + if name == 'Deleted': + d = Deleted() + self.deleted.append(d) + return d + elif name == 'Error': + e = Error() + self.errors.append(e) + return e + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + diff --git a/awx/lib/site-packages/boto/s3/multipart.py b/awx/lib/site-packages/boto/s3/multipart.py new file mode 100644 index 0000000000..fae3389ef7 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/multipart.py @@ -0,0 +1,319 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import user +import key +from boto import handler +import xml.sax + + +class CompleteMultiPartUpload(object): + """ + Represents a completed MultiPart Upload. Contains the + following useful attributes: + + * location - The URI of the completed upload + * bucket_name - The name of the bucket in which the upload + is contained + * key_name - The name of the new, completed key + * etag - The MD5 hash of the completed, combined upload + * version_id - The version_id of the completed upload + * encrypted - The value of the encryption header + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.location = None + self.bucket_name = None + self.key_name = None + self.etag = None + self.version_id = None + self.encrypted = None + + def __repr__(self): + return '' % (self.bucket_name, + self.key_name) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Location': + self.location = value + elif name == 'Bucket': + self.bucket_name = value + elif name == 'Key': + self.key_name = value + elif name == 'ETag': + self.etag = value + else: + setattr(self, name, value) + + +class Part(object): + """ + Represents a single part in a MultiPart upload. + Attributes include: + + * part_number - The integer part number + * last_modified - The last modified date of this part + * etag - The MD5 hash of this part + * size - The size, in bytes, of this part + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.part_number = None + self.last_modified = None + self.etag = None + self.size = None + + def __repr__(self): + if isinstance(self.part_number, int): + return '' % self.part_number + else: + return '' % None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'PartNumber': + self.part_number = int(value) + elif name == 'LastModified': + self.last_modified = value + elif name == 'ETag': + self.etag = value + elif name == 'Size': + self.size = int(value) + else: + setattr(self, name, value) + + +def part_lister(mpupload, part_number_marker=None): + """ + A generator function for listing parts of a multipart upload. + """ + more_results = True + part = None + while more_results: + parts = mpupload.get_all_parts(None, part_number_marker) + for part in parts: + yield part + part_number_marker = mpupload.next_part_number_marker + more_results = mpupload.is_truncated + + +class MultiPartUpload(object): + """ + Represents a MultiPart Upload operation. + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.bucket_name = None + self.key_name = None + self.id = id + self.initiator = None + self.owner = None + self.storage_class = None + self.initiated = None + self.part_number_marker = None + self.next_part_number_marker = None + self.max_parts = None + self.is_truncated = False + self._parts = None + + def __repr__(self): + return '' % self.key_name + + def __iter__(self): + return part_lister(self) + + def to_xml(self): + s = '\n' + for part in self: + s += ' \n' + s += ' %d\n' % part.part_number + s += ' %s\n' % part.etag + s += ' \n' + s += '' + return s + + def startElement(self, name, attrs, connection): + if name == 'Initiator': + self.initiator = user.User(self) + return self.initiator + elif name == 'Owner': + self.owner = user.User(self) + return self.owner + elif name == 'Part': + part = Part(self.bucket) + self._parts.append(part) + return part + return None + + def endElement(self, name, value, connection): + if name == 'Bucket': + self.bucket_name = value + elif name == 'Key': + self.key_name = value + elif name == 'UploadId': + self.id = value + elif name == 'StorageClass': + self.storage_class = value + elif name == 'PartNumberMarker': + self.part_number_marker = value + elif name == 'NextPartNumberMarker': + self.next_part_number_marker = value + elif name == 'MaxParts': + self.max_parts = int(value) + elif name == 'IsTruncated': + if value == 'true': + self.is_truncated = True + else: + self.is_truncated = False + elif name == 'Initiated': + self.initiated = value + else: + setattr(self, name, value) + + def get_all_parts(self, max_parts=None, part_number_marker=None): + """ + Return the uploaded parts of this MultiPart Upload. This is + a lower-level method that requires you to manually page through + results. To simplify this process, you can just use the + object itself as an iterator and it will automatically handle + all of the paging with S3. + """ + self._parts = [] + query_args = 'uploadId=%s' % self.id + if max_parts: + query_args += '&max-parts=%d' % max_parts + if part_number_marker: + query_args += '&part-number-marker=%s' % part_number_marker + response = self.bucket.connection.make_request('GET', self.bucket.name, + self.key_name, + query_args=query_args) + body = response.read() + if response.status == 200: + h = handler.XmlHandler(self, self) + xml.sax.parseString(body, h) + return self._parts + + def upload_part_from_file(self, fp, part_num, headers=None, replace=True, + cb=None, num_cb=10, md5=None, size=None): + """ + Upload another part of this MultiPart Upload. + + :type fp: file + :param fp: The file object you want to upload. + + :type part_num: int + :param part_num: The number of this part. + + The other parameters are exactly as defined for the + :class:`boto.s3.key.Key` set_contents_from_file method. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: The uploaded part containing the etag. + """ + if part_num < 1: + raise ValueError('Part numbers must be greater than zero') + query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num) + key = self.bucket.new_key(self.key_name) + key.set_contents_from_file(fp, headers=headers, replace=replace, + cb=cb, num_cb=num_cb, md5=md5, + reduced_redundancy=False, + query_args=query_args, size=size) + return key + + def copy_part_from_key(self, src_bucket_name, src_key_name, part_num, + start=None, end=None, src_version_id=None, + headers=None): + """ + Copy another part of this MultiPart Upload. + + :type src_bucket_name: string + :param src_bucket_name: Name of the bucket containing the source key + + :type src_key_name: string + :param src_key_name: Name of the source key + + :type part_num: int + :param part_num: The number of this part. + + :type start: int + :param start: Zero-based byte offset to start copying from + + :type end: int + :param end: Zero-based byte offset to copy to + + :type src_version_id: string + :param src_version_id: version_id of source object to copy from + + :type headers: dict + :param headers: Any headers to pass along in the request + """ + if part_num < 1: + raise ValueError('Part numbers must be greater than zero') + query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num) + if start is not None and end is not None: + rng = 'bytes=%s-%s' % (start, end) + provider = self.bucket.connection.provider + if headers is None: + headers = {} + else: + headers = headers.copy() + headers[provider.copy_source_range_header] = rng + return self.bucket.copy_key(self.key_name, src_bucket_name, + src_key_name, + src_version_id=src_version_id, + storage_class=None, + headers=headers, + query_args=query_args) + + def complete_upload(self): + """ + Complete the MultiPart Upload operation. This method should + be called when all parts of the file have been successfully + uploaded to S3. + + :rtype: :class:`boto.s3.multipart.CompletedMultiPartUpload` + :returns: An object representing the completed upload. + """ + xml = self.to_xml() + return self.bucket.complete_multipart_upload(self.key_name, + self.id, xml) + + def cancel_upload(self): + """ + Cancels a MultiPart Upload operation. The storage consumed by + any previously uploaded parts will be freed. However, if any + part uploads are currently in progress, those part uploads + might or might not succeed. As a result, it might be necessary + to abort a given multipart upload multiple times in order to + completely free all storage consumed by all parts. + """ + self.bucket.cancel_multipart_upload(self.key_name, self.id) diff --git a/awx/lib/site-packages/boto/s3/prefix.py b/awx/lib/site-packages/boto/s3/prefix.py new file mode 100644 index 0000000000..adf28e935f --- /dev/null +++ b/awx/lib/site-packages/boto/s3/prefix.py @@ -0,0 +1,42 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Prefix(object): + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Prefix': + self.name = value + else: + setattr(self, name, value) + + @property + def provider(self): + provider = None + if self.bucket and self.bucket.connection: + provider = self.bucket.connection.provider + return provider + diff --git a/awx/lib/site-packages/boto/s3/resumable_download_handler.py b/awx/lib/site-packages/boto/s3/resumable_download_handler.py new file mode 100644 index 0000000000..cf182791fc --- /dev/null +++ b/awx/lib/site-packages/boto/s3/resumable_download_handler.py @@ -0,0 +1,353 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import errno +import httplib +import os +import re +import socket +import time +import boto +from boto import config, storage_uri_for_key +from boto.connection import AWSAuthConnection +from boto.exception import ResumableDownloadException +from boto.exception import ResumableTransferDisposition +from boto.s3.keyfile import KeyFile +from boto.gs.key import Key as GSKey + +""" +Resumable download handler. + +Resumable downloads will retry failed downloads, resuming at the byte count +completed by the last download attempt. If too many retries happen with no +progress (per configurable num_retries param), the download will be aborted. + +The caller can optionally specify a tracker_file_name param in the +ResumableDownloadHandler constructor. If you do this, that file will +save the state needed to allow retrying later, in a separate process +(e.g., in a later run of gsutil). + +Note that resumable downloads work across providers (they depend only +on support Range GETs), but this code is in the boto.s3 package +because it is the wrong abstraction level to go in the top-level boto +package. + +TODO: At some point we should refactor the code to have a storage_service +package where all these provider-independent files go. +""" + + +class ByteTranslatingCallbackHandler(object): + """ + Proxy class that translates progress callbacks made by + boto.s3.Key.get_file(), taking into account that we're resuming + a download. + """ + def __init__(self, proxied_cb, download_start_point): + self.proxied_cb = proxied_cb + self.download_start_point = download_start_point + + def call(self, total_bytes_uploaded, total_size): + self.proxied_cb(self.download_start_point + total_bytes_uploaded, + total_size) + + +def get_cur_file_size(fp, position_to_eof=False): + """ + Returns size of file, optionally leaving fp positioned at EOF. + """ + if isinstance(fp, KeyFile) and not position_to_eof: + # Avoid EOF seek for KeyFile case as it's very inefficient. + return fp.getkey().size + if not position_to_eof: + cur_pos = fp.tell() + fp.seek(0, os.SEEK_END) + cur_file_size = fp.tell() + if not position_to_eof: + fp.seek(cur_pos, os.SEEK_SET) + return cur_file_size + + +class ResumableDownloadHandler(object): + """ + Handler for resumable downloads. + """ + + MIN_ETAG_LEN = 5 + + RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error, + socket.gaierror) + + def __init__(self, tracker_file_name=None, num_retries=None): + """ + Constructor. Instantiate once for each downloaded file. + + :type tracker_file_name: string + :param tracker_file_name: optional file name to save tracking info + about this download. If supplied and the current process fails + the download, it can be retried in a new process. If called + with an existing file containing an unexpired timestamp, + we'll resume the transfer for this file; else we'll start a + new resumable download. + + :type num_retries: int + :param num_retries: the number of times we'll re-try a resumable + download making no progress. (Count resets every time we get + progress, so download can span many more than this number of + retries.) + """ + self.tracker_file_name = tracker_file_name + self.num_retries = num_retries + self.etag_value_for_current_download = None + if tracker_file_name: + self._load_tracker_file_etag() + # Save download_start_point in instance state so caller can + # find how much was transferred by this ResumableDownloadHandler + # (across retries). + self.download_start_point = None + + def _load_tracker_file_etag(self): + f = None + try: + f = open(self.tracker_file_name, 'r') + self.etag_value_for_current_download = f.readline().rstrip('\n') + # We used to match an MD5-based regex to ensure that the etag was + # read correctly. Since ETags need not be MD5s, we now do a simple + # length sanity check instead. + if len(self.etag_value_for_current_download) < self.MIN_ETAG_LEN: + print('Couldn\'t read etag in tracker file (%s). Restarting ' + 'download from scratch.' % self.tracker_file_name) + except IOError, e: + # Ignore non-existent file (happens first time a download + # is attempted on an object), but warn user for other errors. + if e.errno != errno.ENOENT: + # Will restart because + # self.etag_value_for_current_download == None. + print('Couldn\'t read URI tracker file (%s): %s. Restarting ' + 'download from scratch.' % + (self.tracker_file_name, e.strerror)) + finally: + if f: + f.close() + + def _save_tracker_info(self, key): + self.etag_value_for_current_download = key.etag.strip('"\'') + if not self.tracker_file_name: + return + f = None + try: + f = open(self.tracker_file_name, 'w') + f.write('%s\n' % self.etag_value_for_current_download) + except IOError, e: + raise ResumableDownloadException( + 'Couldn\'t write tracker file (%s): %s.\nThis can happen' + 'if you\'re using an incorrectly configured download tool\n' + '(e.g., gsutil configured to save tracker files to an ' + 'unwritable directory)' % + (self.tracker_file_name, e.strerror), + ResumableTransferDisposition.ABORT) + finally: + if f: + f.close() + + def _remove_tracker_file(self): + if (self.tracker_file_name and + os.path.exists(self.tracker_file_name)): + os.unlink(self.tracker_file_name) + + def _attempt_resumable_download(self, key, fp, headers, cb, num_cb, + torrent, version_id, hash_algs): + """ + Attempts a resumable download. + + Raises ResumableDownloadException if any problems occur. + """ + cur_file_size = get_cur_file_size(fp, position_to_eof=True) + + if (cur_file_size and + self.etag_value_for_current_download and + self.etag_value_for_current_download == key.etag.strip('"\'')): + # Try to resume existing transfer. + if cur_file_size > key.size: + raise ResumableDownloadException( + '%s is larger (%d) than %s (%d).\nDeleting tracker file, so ' + 'if you re-try this download it will start from scratch' % + (fp.name, cur_file_size, str(storage_uri_for_key(key)), + key.size), ResumableTransferDisposition.ABORT) + elif cur_file_size == key.size: + if key.bucket.connection.debug >= 1: + print 'Download complete.' + return + if key.bucket.connection.debug >= 1: + print 'Resuming download.' + headers = headers.copy() + headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1) + cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call + self.download_start_point = cur_file_size + else: + if key.bucket.connection.debug >= 1: + print 'Starting new resumable download.' + self._save_tracker_info(key) + self.download_start_point = 0 + # Truncate the file, in case a new resumable download is being + # started atop an existing file. + fp.truncate(0) + + # Disable AWSAuthConnection-level retry behavior, since that would + # cause downloads to restart from scratch. + if isinstance(key, GSKey): + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0, hash_algs=hash_algs) + else: + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0) + fp.flush() + + def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False, + version_id=None, hash_algs=None): + """ + Retrieves a file from a Key + :type key: :class:`boto.s3.key.Key` or subclass + :param key: The Key object from which upload is to be downloaded + + :type fp: file + :param fp: File pointer into which data should be downloaded + + :type headers: string + :param: headers to send when retrieving the files + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from the storage service and + the second representing the total number of bytes that need + to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be + called during the file transfer. + + :type torrent: bool + :param torrent: Flag for whether to get a torrent for the file + + :type version_id: string + :param version_id: The version ID (optional) + + :type hash_algs: dictionary + :param hash_algs: (optional) Dictionary of hash algorithms and + corresponding hashing class that implements update() and digest(). + Defaults to {'md5': hashlib/md5.md5}. + + Raises ResumableDownloadException if a problem occurs during + the transfer. + """ + + debug = key.bucket.connection.debug + if not headers: + headers = {} + + # Use num-retries from constructor if one was provided; else check + # for a value specified in the boto config file; else default to 6. + if self.num_retries is None: + self.num_retries = config.getint('Boto', 'num_retries', 6) + progress_less_iterations = 0 + + while True: # Retry as long as we're making progress. + had_file_bytes_before_attempt = get_cur_file_size(fp) + try: + self._attempt_resumable_download(key, fp, headers, cb, num_cb, + torrent, version_id, hash_algs) + # Download succceded, so remove the tracker file (if have one). + self._remove_tracker_file() + # Previously, check_final_md5() was called here to validate + # downloaded file's checksum, however, to be consistent with + # non-resumable downloads, this call was removed. Checksum + # validation of file contents should be done by the caller. + if debug >= 1: + print 'Resumable download complete.' + return + except self.RETRYABLE_EXCEPTIONS, e: + if debug >= 1: + print('Caught exception (%s)' % e.__repr__()) + if isinstance(e, IOError) and e.errno == errno.EPIPE: + # Broken pipe error causes httplib to immediately + # close the socket (http://bugs.python.org/issue5542), + # so we need to close and reopen the key before resuming + # the download. + if isinstance(key, GSKey): + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0, hash_algs=hash_algs) + else: + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0) + except ResumableDownloadException, e: + if (e.disposition == + ResumableTransferDisposition.ABORT_CUR_PROCESS): + if debug >= 1: + print('Caught non-retryable ResumableDownloadException ' + '(%s)' % e.message) + raise + elif (e.disposition == + ResumableTransferDisposition.ABORT): + if debug >= 1: + print('Caught non-retryable ResumableDownloadException ' + '(%s); aborting and removing tracker file' % + e.message) + self._remove_tracker_file() + raise + else: + if debug >= 1: + print('Caught ResumableDownloadException (%s) - will ' + 'retry' % e.message) + + # At this point we had a re-tryable failure; see if made progress. + if get_cur_file_size(fp) > had_file_bytes_before_attempt: + progress_less_iterations = 0 + else: + progress_less_iterations += 1 + + if progress_less_iterations > self.num_retries: + # Don't retry any longer in the current process. + raise ResumableDownloadException( + 'Too many resumable download attempts failed without ' + 'progress. You might try this download again later', + ResumableTransferDisposition.ABORT_CUR_PROCESS) + + # Close the key, in case a previous download died partway + # through and left data in the underlying key HTTP buffer. + # Do this within a try/except block in case the connection is + # closed (since key.close() attempts to do a final read, in which + # case this read attempt would get an IncompleteRead exception, + # which we can safely ignore. + try: + key.close() + except httplib.IncompleteRead: + pass + + sleep_time_secs = 2**progress_less_iterations + if debug >= 1: + print('Got retryable failure (%d progress-less in a row).\n' + 'Sleeping %d seconds before re-trying' % + (progress_less_iterations, sleep_time_secs)) + time.sleep(sleep_time_secs) diff --git a/awx/lib/site-packages/boto/s3/tagging.py b/awx/lib/site-packages/boto/s3/tagging.py new file mode 100644 index 0000000000..0af6406fb1 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/tagging.py @@ -0,0 +1,71 @@ +from boto import handler +import xml.sax + + +class Tag(object): + def __init__(self, key=None, value=None): + self.key = key + self.value = value + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'Value': + self.value = value + + def to_xml(self): + return '%s%s' % ( + self.key, self.value) + + def __eq__(self, other): + return (self.key == other.key and self.value == other.value) + + +class TagSet(list): + def startElement(self, name, attrs, connection): + if name == 'Tag': + tag = Tag() + self.append(tag) + return tag + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def add_tag(self, key, value): + tag = Tag(key, value) + self.append(tag) + + def to_xml(self): + xml = '' + for tag in self: + xml += tag.to_xml() + xml += '' + return xml + + +class Tags(list): + """A container for the tags associated with a bucket.""" + + def startElement(self, name, attrs, connection): + if name == 'TagSet': + tag_set = TagSet() + self.append(tag_set) + return tag_set + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + xml = '' + for tag_set in self: + xml += tag_set.to_xml() + xml +='' + return xml + + def add_tag_set(self, tag_set): + self.append(tag_set) diff --git a/awx/lib/site-packages/boto/s3/user.py b/awx/lib/site-packages/boto/s3/user.py new file mode 100644 index 0000000000..f45f038130 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/user.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class User: + def __init__(self, parent=None, id='', display_name=''): + if parent: + parent.owner = self + self.type = None + self.id = id + self.display_name = display_name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DisplayName': + self.display_name = value + elif name == 'ID': + self.id = value + else: + setattr(self, name, value) + + def to_xml(self, element_name='Owner'): + if self.type: + s = '<%s xsi:type="%s">' % (element_name, self.type) + else: + s = '<%s>' % element_name + s += '%s' % self.id + s += '%s' % self.display_name + s += '' % element_name + return s diff --git a/awx/lib/site-packages/boto/s3/website.py b/awx/lib/site-packages/boto/s3/website.py new file mode 100644 index 0000000000..c307f3e990 --- /dev/null +++ b/awx/lib/site-packages/boto/s3/website.py @@ -0,0 +1,293 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +def tag(key, value): + start = '<%s>' % key + end = '' % key + return '%s%s%s' % (start, value, end) + + +class WebsiteConfiguration(object): + """ + Website configuration for a bucket. + + :ivar suffix: Suffix that is appended to a request that is for a + "directory" on the website endpoint (e.g. if the suffix is + index.html and you make a request to samplebucket/images/ + the data that is returned will be for the object with the + key name images/index.html). The suffix must not be empty + and must not include a slash character. + + :ivar error_key: The object key name to use when a 4xx class error + occurs. This key identifies the page that is returned when + such an error occurs. + + :ivar redirect_all_requests_to: Describes the redirect behavior for every + request to this bucket's website endpoint. If this value is non None, + no other values are considered when configuring the website + configuration for the bucket. This is an instance of + ``RedirectLocation``. + + :ivar routing_rules: ``RoutingRules`` object which specifies conditions + and redirects that apply when the conditions are met. + + """ + + def __init__(self, suffix=None, error_key=None, + redirect_all_requests_to=None, routing_rules=None): + self.suffix = suffix + self.error_key = error_key + self.redirect_all_requests_to = redirect_all_requests_to + if routing_rules is not None: + self.routing_rules = routing_rules + else: + self.routing_rules = RoutingRules() + + def startElement(self, name, attrs, connection): + if name == 'RoutingRules': + self.routing_rules = RoutingRules() + return self.routing_rules + elif name == 'IndexDocument': + return _XMLKeyValue([('Suffix', 'suffix')], container=self) + elif name == 'ErrorDocument': + return _XMLKeyValue([('Key', 'error_key')], container=self) + + def endElement(self, name, value, connection): + pass + + def to_xml(self): + parts = ['', + ''] + if self.suffix is not None: + parts.append(tag('IndexDocument', tag('Suffix', self.suffix))) + if self.error_key is not None: + parts.append(tag('ErrorDocument', tag('Key', self.error_key))) + if self.redirect_all_requests_to is not None: + parts.append(self.redirect_all_requests_to.to_xml()) + if self.routing_rules: + parts.append(self.routing_rules.to_xml()) + parts.append('') + return ''.join(parts) + + +class _XMLKeyValue(object): + def __init__(self, translator, container=None): + self.translator = translator + if container: + self.container = container + else: + self.container = self + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + for xml_key, attr_name in self.translator: + if name == xml_key: + setattr(self.container, attr_name, value) + + def to_xml(self): + parts = [] + for xml_key, attr_name in self.translator: + content = getattr(self.container, attr_name) + if content is not None: + parts.append(tag(xml_key, content)) + return ''.join(parts) + + +class RedirectLocation(_XMLKeyValue): + """Specify redirect behavior for every request to a bucket's endpoint. + + :ivar hostname: Name of the host where requests will be redirected. + + :ivar protocol: Protocol to use (http, https) when redirecting requests. + The default is the protocol that is used in the original request. + + """ + TRANSLATOR = [('HostName', 'hostname'), + ('Protocol', 'protocol'), + ] + + def __init__(self, hostname=None, protocol=None): + self.hostname = hostname + self.protocol = protocol + super(RedirectLocation, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('RedirectAllRequestsTo', + super(RedirectLocation, self).to_xml()) + + +class RoutingRules(list): + + def add_rule(self, rule): + """ + + :type rule: :class:`boto.s3.website.RoutingRule` + :param rule: A routing rule. + + :return: This ``RoutingRules`` object is returned, + so that it can chain subsequent calls. + + """ + self.append(rule) + return self + + def startElement(self, name, attrs, connection): + if name == 'RoutingRule': + rule = RoutingRule(Condition(), Redirect()) + self.add_rule(rule) + return rule + + def endElement(self, name, value, connection): + pass + + def __repr__(self): + return "RoutingRules(%s)" % super(RoutingRules, self).__repr__() + + def to_xml(self): + inner_text = [] + for rule in self: + inner_text.append(rule.to_xml()) + return tag('RoutingRules', '\n'.join(inner_text)) + + +class RoutingRule(object): + """Represents a single routing rule. + + There are convenience methods to making creating rules + more concise:: + + rule = RoutingRule.when(key_prefix='foo/').then_redirect('example.com') + + :ivar condition: Describes condition that must be met for the + specified redirect to apply. + + :ivar redirect: Specifies redirect behavior. You can redirect requests to + another host, to another page, or with another protocol. In the event + of an error, you can can specify a different error code to return. + + """ + def __init__(self, condition=None, redirect=None): + self.condition = condition + self.redirect = redirect + + def startElement(self, name, attrs, connection): + if name == 'Condition': + return self.condition + elif name == 'Redirect': + return self.redirect + + def endElement(self, name, value, connection): + pass + + def to_xml(self): + parts = [] + if self.condition: + parts.append(self.condition.to_xml()) + if self.redirect: + parts.append(self.redirect.to_xml()) + return tag('RoutingRule', '\n'.join(parts)) + + @classmethod + def when(cls, key_prefix=None, http_error_code=None): + return cls(Condition(key_prefix=key_prefix, + http_error_code=http_error_code), None) + + def then_redirect(self, hostname=None, protocol=None, replace_key=None, + replace_key_prefix=None, http_redirect_code=None): + self.redirect = Redirect( + hostname=hostname, protocol=protocol, + replace_key=replace_key, + replace_key_prefix=replace_key_prefix, + http_redirect_code=http_redirect_code) + return self + + +class Condition(_XMLKeyValue): + """ + :ivar key_prefix: The object key name prefix when the redirect is applied. + For example, to redirect requests for ExamplePage.html, the key prefix + will be ExamplePage.html. To redirect request for all pages with the + prefix docs/, the key prefix will be /docs, which identifies all + objects in the docs/ folder. + + :ivar http_error_code: The HTTP error code when the redirect is applied. In + the event of an error, if the error code equals this value, then the + specified redirect is applied. + + """ + TRANSLATOR = [ + ('KeyPrefixEquals', 'key_prefix'), + ('HttpErrorCodeReturnedEquals', 'http_error_code'), + ] + + def __init__(self, key_prefix=None, http_error_code=None): + self.key_prefix = key_prefix + self.http_error_code = http_error_code + super(Condition, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('Condition', super(Condition, self).to_xml()) + + +class Redirect(_XMLKeyValue): + """ + :ivar hostname: The host name to use in the redirect request. + + :ivar protocol: The protocol to use in the redirect request. Can be either + 'http' or 'https'. + + :ivar replace_key: The specific object key to use in the redirect request. + For example, redirect request to error.html. + + :ivar replace_key_prefix: The object key prefix to use in the redirect + request. For example, to redirect requests for all pages with prefix + docs/ (objects in the docs/ folder) to documents/, you can set a + condition block with KeyPrefixEquals set to docs/ and in the Redirect + set ReplaceKeyPrefixWith to /documents. + + :ivar http_redirect_code: The HTTP redirect code to use on the response. + + """ + + TRANSLATOR = [ + ('Protocol', 'protocol'), + ('HostName', 'hostname'), + ('ReplaceKeyWith', 'replace_key'), + ('ReplaceKeyPrefixWith', 'replace_key_prefix'), + ('HttpRedirectCode', 'http_redirect_code'), + ] + + def __init__(self, hostname=None, protocol=None, replace_key=None, + replace_key_prefix=None, http_redirect_code=None): + self.hostname = hostname + self.protocol = protocol + self.replace_key = replace_key + self.replace_key_prefix = replace_key_prefix + self.http_redirect_code = http_redirect_code + super(Redirect, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('Redirect', super(Redirect, self).to_xml()) + + diff --git a/awx/lib/site-packages/boto/sdb/__init__.py b/awx/lib/site-packages/boto/sdb/__init__.py new file mode 100644 index 0000000000..bebc15221c --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/__init__.py @@ -0,0 +1,67 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from .regioninfo import SDBRegionInfo + + +def regions(): + """ + Get all available regions for the SDB service. + + :rtype: list + :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances + """ + return [SDBRegionInfo(name='us-east-1', + endpoint='sdb.amazonaws.com'), + SDBRegionInfo(name='eu-west-1', + endpoint='sdb.eu-west-1.amazonaws.com'), + SDBRegionInfo(name='us-west-1', + endpoint='sdb.us-west-1.amazonaws.com'), + SDBRegionInfo(name='sa-east-1', + endpoint='sdb.sa-east-1.amazonaws.com'), + SDBRegionInfo(name='us-west-2', + endpoint='sdb.us-west-2.amazonaws.com'), + SDBRegionInfo(name='ap-northeast-1', + endpoint='sdb.ap-northeast-1.amazonaws.com'), + SDBRegionInfo(name='ap-southeast-1', + endpoint='sdb.ap-southeast-1.amazonaws.com'), + SDBRegionInfo(name='ap-southeast-2', + endpoint='sdb.ap-southeast-2.amazonaws.com') + ] + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sdb.connection.SDBConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sdb.connection.SDBConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/sdb/connection.py b/awx/lib/site-packages/boto/sdb/connection.py new file mode 100644 index 0000000000..dc5e01d55d --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/connection.py @@ -0,0 +1,617 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +import threading +import boto +from boto import handler +from boto.connection import AWSQueryConnection +from boto.sdb.domain import Domain, DomainMetaData +from boto.sdb.item import Item +from boto.sdb.regioninfo import SDBRegionInfo +from boto.exception import SDBResponseError + +class ItemThread(threading.Thread): + """ + A threaded :class:`Item ` retriever utility class. + Retrieved :class:`Item ` objects are stored in the + ``items`` instance variable after :py:meth:`run() ` is called. + + .. tip:: The item retrieval will not start until + the :func:`run() ` method is called. + """ + def __init__(self, name, domain_name, item_names): + """ + :param str name: A thread name. Used for identification. + :param str domain_name: The name of a SimpleDB + :class:`Domain ` + :type item_names: string or list of strings + :param item_names: The name(s) of the items to retrieve from the specified + :class:`Domain `. + :ivar list items: A list of items retrieved. Starts as empty list. + """ + threading.Thread.__init__(self, name=name) + #print 'starting %s with %d items' % (name, len(item_names)) + self.domain_name = domain_name + self.conn = SDBConnection() + self.item_names = item_names + self.items = [] + + def run(self): + """ + Start the threaded retrieval of items. Populates the + ``items`` list with :class:`Item ` objects. + """ + for item_name in self.item_names: + item = self.conn.get_attributes(self.domain_name, item_name) + self.items.append(item) + +#boto.set_stream_logger('sdb') + +class SDBConnection(AWSQueryConnection): + """ + This class serves as a gateway to your SimpleDB region (defaults to + us-east-1). Methods within allow access to SimpleDB + :class:`Domain ` objects and their associated + :class:`Item ` objects. + + .. tip:: + While you may instantiate this class directly, it may be easier to + go through :py:func:`boto.connect_sdb`. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sdb.us-east-1.amazonaws.com' + APIVersion = '2009-04-15' + ResponseError = SDBResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + converter=None, security_token=None, validate_certs=True): + """ + For any keywords that aren't documented, refer to the parent class, + :py:class:`boto.connection.AWSAuthConnection`. You can avoid having + to worry about these keyword arguments by instantiating these objects + via :py:func:`boto.connect_sdb`. + + :type region: :class:`boto.sdb.regioninfo.SDBRegionInfo` + :keyword region: Explicitly specify a region. Defaults to ``us-east-1`` + if not specified. You may also specify the region in your ``boto.cfg``: + + .. code-block:: cfg + + [SDB] + region = eu-west-1 + + """ + if not region: + region_name = boto.config.get('SDB', 'region', self.DefaultRegionName) + for reg in boto.sdb.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, + proxy_port, proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs) + self.box_usage = 0.0 + self.converter = converter + self.item_cls = Item + + def _required_auth_capability(self): + return ['sdb'] + + def set_item_cls(self, cls): + """ + While the default item class is :py:class:`boto.sdb.item.Item`, this + default may be overridden. Use this method to change a connection's + item class. + + :param object cls: The new class to set as this connection's item + class. See the default item class for inspiration as to what your + replacement should/could look like. + """ + self.item_cls = cls + + def _build_name_value_list(self, params, attributes, replace=False, + label='Attribute'): + keys = sorted(attributes.keys()) + i = 1 + for key in keys: + value = attributes[key] + if isinstance(value, list): + for v in value: + params['%s.%d.Name' % (label, i)] = key + if self.converter: + v = self.converter.encode(v) + params['%s.%d.Value' % (label, i)] = v + if replace: + params['%s.%d.Replace' % (label, i)] = 'true' + i += 1 + else: + params['%s.%d.Name' % (label, i)] = key + if self.converter: + value = self.converter.encode(value) + params['%s.%d.Value' % (label, i)] = value + if replace: + params['%s.%d.Replace' % (label, i)] = 'true' + i += 1 + + def _build_expected_value(self, params, expected_value): + params['Expected.1.Name'] = expected_value[0] + if expected_value[1] is True: + params['Expected.1.Exists'] = 'true' + elif expected_value[1] is False: + params['Expected.1.Exists'] = 'false' + else: + params['Expected.1.Value'] = expected_value[1] + + def _build_batch_list(self, params, items, replace=False): + item_names = items.keys() + i = 0 + for item_name in item_names: + params['Item.%d.ItemName' % i] = item_name + j = 0 + item = items[item_name] + if item is not None: + attr_names = item.keys() + for attr_name in attr_names: + value = item[attr_name] + if isinstance(value, list): + for v in value: + if self.converter: + v = self.converter.encode(v) + params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name + params['Item.%d.Attribute.%d.Value' % (i, j)] = v + if replace: + params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' + j += 1 + else: + params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name + if self.converter: + value = self.converter.encode(value) + params['Item.%d.Attribute.%d.Value' % (i, j)] = value + if replace: + params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' + j += 1 + i += 1 + + def _build_name_list(self, params, attribute_names): + i = 1 + attribute_names.sort() + for name in attribute_names: + params['Attribute.%d.Name' % i] = name + i += 1 + + def get_usage(self): + """ + Returns the BoxUsage (in USD) accumulated on this specific SDBConnection + instance. + + .. tip:: This can be out of date, and should only be treated as a + rough estimate. Also note that this estimate only applies to the + requests made on this specific connection instance. It is by + no means an account-wide estimate. + + :rtype: float + :return: The accumulated BoxUsage of all requests made on the connection. + """ + return self.box_usage + + def print_usage(self): + """ + Print the BoxUsage and approximate costs of all requests made on + this specific SDBConnection instance. + + .. tip:: This can be out of date, and should only be treated as a + rough estimate. Also note that this estimate only applies to the + requests made on this specific connection instance. It is by + no means an account-wide estimate. + """ + print 'Total Usage: %f compute seconds' % self.box_usage + cost = self.box_usage * 0.14 + print 'Approximate Cost: $%f' % cost + + def get_domain(self, domain_name, validate=True): + """ + Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name + matches ``domain_name``. + + :param str domain_name: The name of the domain to retrieve + :keyword bool validate: When ``True``, check to see if the domain + actually exists. If ``False``, blindly return a + :py:class:`Domain ` object with the + specified name set. + + :raises: + :py:class:`boto.exception.SDBResponseError` if ``validate`` is + ``True`` and no match could be found. + + :rtype: :py:class:`boto.sdb.domain.Domain` + :return: The requested domain + """ + domain = Domain(self, domain_name) + if validate: + self.select(domain, """select * from `%s` limit 1""" % domain_name) + return domain + + def lookup(self, domain_name, validate=True): + """ + Lookup an existing SimpleDB domain. This differs from + :py:meth:`get_domain` in that ``None`` is returned if ``validate`` is + ``True`` and no match was found (instead of raising an exception). + + :param str domain_name: The name of the domain to retrieve + + :param bool validate: If ``True``, a ``None`` value will be returned + if the specified domain can't be found. If ``False``, a + :py:class:`Domain ` object will be dumbly + returned, regardless of whether it actually exists. + + :rtype: :class:`boto.sdb.domain.Domain` object or ``None`` + :return: The Domain object or ``None`` if the domain does not exist. + """ + try: + domain = self.get_domain(domain_name, validate) + except: + domain = None + return domain + + def get_all_domains(self, max_domains=None, next_token=None): + """ + Returns a :py:class:`boto.resultset.ResultSet` containing + all :py:class:`boto.sdb.domain.Domain` objects associated with + this connection's Access Key ID. + + :keyword int max_domains: Limit the returned + :py:class:`ResultSet ` to the specified + number of members. + :keyword str next_token: A token string that was returned in an + earlier call to this method as the ``next_token`` attribute + on the returned :py:class:`ResultSet ` + object. This attribute is set if there are more than Domains than + the value specified in the ``max_domains`` keyword. Pass the + ``next_token`` value from you earlier query in this keyword to + get the next 'page' of domains. + """ + params = {} + if max_domains: + params['MaxNumberOfDomains'] = max_domains + if next_token: + params['NextToken'] = next_token + return self.get_list('ListDomains', params, [('DomainName', Domain)]) + + def create_domain(self, domain_name): + """ + Create a SimpleDB domain. + + :type domain_name: string + :param domain_name: The name of the new domain + + :rtype: :class:`boto.sdb.domain.Domain` object + :return: The newly created domain + """ + params = {'DomainName':domain_name} + d = self.get_object('CreateDomain', params, Domain) + d.name = domain_name + return d + + def get_domain_and_name(self, domain_or_name): + """ + Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a + ``tuple`` with the following members (in order): + + * In instance of :class:`boto.sdb.domain.Domain` for the requested + domain + * The domain's name as a ``str`` + + :type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain` + :param domain_or_name: The domain or domain name to get the domain + and name for. + + :raises: :class:`boto.exception.SDBResponseError` when an invalid + domain name is specified. + + :rtype: tuple + :return: A ``tuple`` with contents outlined as per above. + """ + if (isinstance(domain_or_name, Domain)): + return (domain_or_name, domain_or_name.name) + else: + return (self.get_domain(domain_or_name), domain_or_name) + + def delete_domain(self, domain_or_name): + """ + Delete a SimpleDB domain. + + .. caution:: This will delete the domain and all items within the domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :rtype: bool + :return: True if successful + + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName':domain_name} + return self.get_status('DeleteDomain', params) + + def domain_metadata(self, domain_or_name): + """ + Get the Metadata for a SimpleDB domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :rtype: :class:`boto.sdb.domain.DomainMetaData` object + :return: The newly created domain metadata object + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName':domain_name} + d = self.get_object('DomainMetadata', params, DomainMetaData) + d.domain = domain + return d + + def put_attributes(self, domain_or_name, item_name, attributes, + replace=True, expected_value=None): + """ + Store attributes for a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being + stored. + + :type attribute_names: dict or dict-like object + :param attribute_names: The name/value pairs to store as attributes + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName' : domain_name, + 'ItemName' : item_name} + self._build_name_value_list(params, attributes, replace) + if expected_value: + self._build_expected_value(params, expected_value) + return self.get_status('PutAttributes', params) + + def batch_put_attributes(self, domain_or_name, items, replace=True): + """ + Store attributes for multiple items in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are themselves dictionaries + of attribute names/values, exactly the same as the + attribute_names parameter of the scalar put_attributes + call. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName' : domain_name} + self._build_batch_list(params, items, replace) + return self.get_status('BatchPutAttributes', params, verb='POST') + + def get_attributes(self, domain_or_name, item_name, attribute_names=None, + consistent_read=False, item=None): + """ + Retrieve attributes for a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are + being retrieved. + + :type attribute_names: string or list of strings + :param attribute_names: An attribute name or list of attribute names. + This parameter is optional. If not supplied, all attributes will + be retrieved for the item. + + :type consistent_read: bool + :param consistent_read: When set to true, ensures that the most recent + data is returned. + + :type item: :class:`boto.sdb.item.Item` + :keyword item: Instead of instantiating a new Item object, you may + specify one to update. + + :rtype: :class:`boto.sdb.item.Item` + :return: An Item with the requested attribute name/values set on it + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName' : domain_name, + 'ItemName' : item_name} + if consistent_read: + params['ConsistentRead'] = 'true' + if attribute_names: + if not isinstance(attribute_names, list): + attribute_names = [attribute_names] + self.build_list_params(params, attribute_names, 'AttributeName') + response = self.make_request('GetAttributes', params) + body = response.read() + if response.status == 200: + if item == None: + item = self.item_cls(domain, item_name) + h = handler.XmlHandler(item, self) + xml.sax.parseString(body, h) + return item + else: + raise SDBResponseError(response.status, response.reason, body) + + def delete_attributes(self, domain_or_name, item_name, attr_names=None, + expected_value=None): + """ + Delete attributes from a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being + deleted. + + :type attributes: dict, list or :class:`boto.sdb.item.Item` + :param attributes: Either a list containing attribute names which + will cause all values associated with that attribute + name to be deleted or a dict or Item containing the + attribute names and keys and list of values to + delete as the value. If no value is supplied, + all attribute name/values for the item will be + deleted. + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName':domain_name, + 'ItemName' : item_name} + if attr_names: + if isinstance(attr_names, list): + self._build_name_list(params, attr_names) + elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls): + self._build_name_value_list(params, attr_names) + if expected_value: + self._build_expected_value(params, expected_value) + return self.get_status('DeleteAttributes', params) + + def batch_delete_attributes(self, domain_or_name, items): + """ + Delete multiple items in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are either: + + * dictionaries of attribute names/values, exactly the + same as the attribute_names parameter of the scalar + put_attributes call. The attribute name/value pairs + will only be deleted if they match the name/value + pairs passed in. + * None which means that all attributes associated + with the item should be deleted. + + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName' : domain_name} + self._build_batch_list(params, items, False) + return self.get_status('BatchDeleteAttributes', params, verb='POST') + + def select(self, domain_or_name, query='', next_token=None, + consistent_read=False): + """ + Returns a set of Attributes for item names within domain_name that + match the query. The query must be expressed in using the SELECT + style syntax rather than the original SimpleDB query language. + Even though the select request does not require a domain object, + a domain object must be passed into this method so the Item objects + returned can point to the appropriate domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object + :param domain_or_name: Either the name of a domain or a Domain object + + :type query: string + :param query: The SimpleDB query to be performed. + + :type consistent_read: bool + :param consistent_read: When set to true, ensures that the most recent + data is returned. + + :rtype: ResultSet + :return: An iterator containing the results. + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'SelectExpression' : query} + if consistent_read: + params['ConsistentRead'] = 'true' + if next_token: + params['NextToken'] = next_token + try: + return self.get_list('Select', params, [('Item', self.item_cls)], + parent=domain) + except SDBResponseError, e: + e.body = "Query: %s\n%s" % (query, e.body) + raise e diff --git a/awx/lib/site-packages/boto/sdb/db/__init__.py b/awx/lib/site-packages/boto/sdb/db/__init__.py new file mode 100644 index 0000000000..71f6b7b738 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/awx/lib/site-packages/boto/sdb/db/blob.py b/awx/lib/site-packages/boto/sdb/db/blob.py new file mode 100644 index 0000000000..b50794c961 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/blob.py @@ -0,0 +1,75 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Blob(object): + """Blob object""" + def __init__(self, value=None, file=None, id=None): + self._file = file + self.id = id + self.value = value + + @property + def file(self): + from StringIO import StringIO + if self._file: + f = self._file + else: + f = StringIO(self.value) + return f + + def __str__(self): + return unicode(self).encode('utf-8') + + def __unicode__(self): + if hasattr(self.file, "get_contents_as_string"): + value = self.file.get_contents_as_string() + else: + value = self.file.getvalue() + if isinstance(value, unicode): + return value + else: + return value.decode('utf-8') + + + def read(self): + if hasattr(self.file, "get_contents_as_string"): + return self.file.get_contents_as_string() + else: + return self.file.read() + + def readline(self): + return self.file.readline() + + def next(self): + return self.file.next() + + def __iter__(self): + return iter(self.file) + + @property + def size(self): + if self._file: + return self._file.size + elif self.value: + return len(self.value) + else: + return 0 diff --git a/awx/lib/site-packages/boto/sdb/db/key.py b/awx/lib/site-packages/boto/sdb/db/key.py new file mode 100644 index 0000000000..f630d398a3 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/key.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Key(object): + + @classmethod + def from_path(cls, *args, **kwds): + raise NotImplementedError("Paths are not currently supported") + + def __init__(self, encoded=None, obj=None): + self.name = None + if obj: + self.id = obj.id + self.kind = obj.kind() + else: + self.id = None + self.kind = None + + def app(self): + raise NotImplementedError("Applications are not currently supported") + + def kind(self): + return self.kind + + def id(self): + return self.id + + def name(self): + raise NotImplementedError("Key Names are not currently supported") + + def id_or_name(self): + return self.id + + def has_id_or_name(self): + return self.id != None + + def parent(self): + raise NotImplementedError("Key parents are not currently supported") + + def __str__(self): + return self.id_or_name() diff --git a/awx/lib/site-packages/boto/sdb/db/manager/__init__.py b/awx/lib/site-packages/boto/sdb/db/manager/__init__.py new file mode 100644 index 0000000000..ded1716cbb --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/manager/__init__.py @@ -0,0 +1,85 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto + + +def get_manager(cls): + """ + Returns the appropriate Manager class for a given Model class. It + does this by looking in the boto config for a section like this:: + + [DB] + db_type = SimpleDB + db_user = + db_passwd = + db_name = my_domain + [DB_TestBasic] + db_type = SimpleDB + db_user = + db_passwd = + db_name = basic_domain + db_port = 1111 + + The values in the DB section are "generic values" that will be used + if nothing more specific is found. You can also create a section for + a specific Model class that gives the db info for that class. + In the example above, TestBasic is a Model subclass. + """ + db_user = boto.config.get('DB', 'db_user', None) + db_passwd = boto.config.get('DB', 'db_passwd', None) + db_type = boto.config.get('DB', 'db_type', 'SimpleDB') + db_name = boto.config.get('DB', 'db_name', None) + db_table = boto.config.get('DB', 'db_table', None) + db_host = boto.config.get('DB', 'db_host', "sdb.amazonaws.com") + db_port = boto.config.getint('DB', 'db_port', 443) + enable_ssl = boto.config.getbool('DB', 'enable_ssl', True) + sql_dir = boto.config.get('DB', 'sql_dir', None) + debug = boto.config.getint('DB', 'debug', 0) + # first see if there is a fully qualified section name in the Boto config + module_name = cls.__module__.replace('.', '_') + db_section = 'DB_' + module_name + '_' + cls.__name__ + if not boto.config.has_section(db_section): + db_section = 'DB_' + cls.__name__ + if boto.config.has_section(db_section): + db_user = boto.config.get(db_section, 'db_user', db_user) + db_passwd = boto.config.get(db_section, 'db_passwd', db_passwd) + db_type = boto.config.get(db_section, 'db_type', db_type) + db_name = boto.config.get(db_section, 'db_name', db_name) + db_table = boto.config.get(db_section, 'db_table', db_table) + db_host = boto.config.get(db_section, 'db_host', db_host) + db_port = boto.config.getint(db_section, 'db_port', db_port) + enable_ssl = boto.config.getint(db_section, 'enable_ssl', enable_ssl) + debug = boto.config.getint(db_section, 'debug', debug) + elif hasattr(cls, "_db_name") and cls._db_name is not None: + # More specific then the generic DB config is any _db_name class property + db_name = cls._db_name + elif hasattr(cls.__bases__[0], "_manager"): + return cls.__bases__[0]._manager + if db_type == 'SimpleDB': + from boto.sdb.db.manager.sdbmanager import SDBManager + return SDBManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + elif db_type == 'XML': + from boto.sdb.db.manager.xmlmanager import XMLManager + return XMLManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + else: + raise ValueError('Unknown db_type: %s' % db_type) diff --git a/awx/lib/site-packages/boto/sdb/db/manager/sdbmanager.py b/awx/lib/site-packages/boto/sdb/db/manager/sdbmanager.py new file mode 100644 index 0000000000..fd9777deb6 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/manager/sdbmanager.py @@ -0,0 +1,736 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +import re +from boto.utils import find_class +import uuid +from boto.sdb.db.key import Key +from boto.sdb.db.blob import Blob +from boto.sdb.db.property import ListProperty, MapProperty +from datetime import datetime, date, time +from boto.exception import SDBPersistenceError, S3ResponseError + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + + +class TimeDecodeError(Exception): + pass + + +class SDBConverter(object): + """ + Responsible for converting base Python types to format compatible + with underlying database. For SimpleDB, that means everything + needs to be converted to a string when stored in SimpleDB and from + a string when retrieved. + + To convert a value, pass it to the encode or decode method. The + encode method will take a Python native value and convert to DB + format. The decode method will take a DB format value and convert + it to Python native format. To find the appropriate method to + call, the generic encode/decode methods will look for the + type-specific method by searching for a method + called"encode_" or "decode_". + """ + def __init__(self, manager): + # Do a delayed import to prevent possible circular import errors. + from boto.sdb.db.model import Model + self.model_class = Model + self.manager = manager + self.type_map = {bool: (self.encode_bool, self.decode_bool), + int: (self.encode_int, self.decode_int), + long: (self.encode_long, self.decode_long), + float: (self.encode_float, self.decode_float), + self.model_class: ( + self.encode_reference, self.decode_reference + ), + Key: (self.encode_reference, self.decode_reference), + datetime: (self.encode_datetime, self.decode_datetime), + date: (self.encode_date, self.decode_date), + time: (self.encode_time, self.decode_time), + Blob: (self.encode_blob, self.decode_blob), + str: (self.encode_string, self.decode_string), + } + + def encode(self, item_type, value): + try: + if self.model_class in item_type.mro(): + item_type = self.model_class + except: + pass + if item_type in self.type_map: + encode = self.type_map[item_type][0] + return encode(value) + return value + + def decode(self, item_type, value): + if item_type in self.type_map: + decode = self.type_map[item_type][1] + return decode(value) + return value + + def encode_list(self, prop, value): + if value in (None, []): + return [] + if not isinstance(value, list): + # This is a little trick to avoid encoding when it's just a single value, + # since that most likely means it's from a query + item_type = getattr(prop, "item_type") + return self.encode(item_type, value) + # Just enumerate(value) won't work here because + # we need to add in some zero padding + # We support lists up to 1,000 attributes, since + # SDB technically only supports 1024 attributes anyway + values = {} + for k, v in enumerate(value): + values["%03d" % k] = v + return self.encode_map(prop, values) + + def encode_map(self, prop, value): + import urllib + if value == None: + return None + if not isinstance(value, dict): + raise ValueError('Expected a dict value, got %s' % type(value)) + new_value = [] + for key in value: + item_type = getattr(prop, "item_type") + if self.model_class in item_type.mro(): + item_type = self.model_class + encoded_value = self.encode(item_type, value[key]) + if encoded_value != None: + new_value.append('%s:%s' % (urllib.quote(key), encoded_value)) + return new_value + + def encode_prop(self, prop, value): + if isinstance(prop, ListProperty): + return self.encode_list(prop, value) + elif isinstance(prop, MapProperty): + return self.encode_map(prop, value) + else: + return self.encode(prop.data_type, value) + + def decode_list(self, prop, value): + if not isinstance(value, list): + value = [value] + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + dec_val = {} + for val in value: + if val != None: + k, v = self.decode_map_element(item_type, val) + try: + k = int(k) + except: + k = v + dec_val[k] = v + value = dec_val.values() + return value + + def decode_map(self, prop, value): + if not isinstance(value, list): + value = [value] + ret_value = {} + item_type = getattr(prop, "item_type") + for val in value: + k, v = self.decode_map_element(item_type, val) + ret_value[k] = v + return ret_value + + def decode_map_element(self, item_type, value): + """Decode a single element for a map""" + import urllib + key = value + if ":" in value: + key, value = value.split(':', 1) + key = urllib.unquote(key) + if self.model_class in item_type.mro(): + value = item_type(id=value) + else: + value = self.decode(item_type, value) + return (key, value) + + def decode_prop(self, prop, value): + if isinstance(prop, ListProperty): + return self.decode_list(prop, value) + elif isinstance(prop, MapProperty): + return self.decode_map(prop, value) + else: + return self.decode(prop.data_type, value) + + def encode_int(self, value): + value = int(value) + value += 2147483648 + return '%010d' % value + + def decode_int(self, value): + try: + value = int(value) + except: + boto.log.error("Error, %s is not an integer" % value) + value = 0 + value = int(value) + value -= 2147483648 + return int(value) + + def encode_long(self, value): + value = long(value) + value += 9223372036854775808 + return '%020d' % value + + def decode_long(self, value): + value = long(value) + value -= 9223372036854775808 + return value + + def encode_bool(self, value): + if value == True or str(value).lower() in ("true", "yes"): + return 'true' + else: + return 'false' + + def decode_bool(self, value): + if value.lower() == 'true': + return True + else: + return False + + def encode_float(self, value): + """ + See http://tools.ietf.org/html/draft-wood-ldapext-float-00. + """ + s = '%e' % value + l = s.split('e') + mantissa = l[0].ljust(18, '0') + exponent = l[1] + if value == 0.0: + case = '3' + exponent = '000' + elif mantissa[0] != '-' and exponent[0] == '+': + case = '5' + exponent = exponent[1:].rjust(3, '0') + elif mantissa[0] != '-' and exponent[0] == '-': + case = '4' + exponent = 999 + int(exponent) + exponent = '%03d' % exponent + elif mantissa[0] == '-' and exponent[0] == '-': + case = '2' + mantissa = '%f' % (10 + float(mantissa)) + mantissa = mantissa.ljust(18, '0') + exponent = exponent[1:].rjust(3, '0') + else: + case = '1' + mantissa = '%f' % (10 + float(mantissa)) + mantissa = mantissa.ljust(18, '0') + exponent = 999 - int(exponent) + exponent = '%03d' % exponent + return '%s %s %s' % (case, exponent, mantissa) + + def decode_float(self, value): + case = value[0] + exponent = value[2:5] + mantissa = value[6:] + if case == '3': + return 0.0 + elif case == '5': + pass + elif case == '4': + exponent = '%03d' % (int(exponent) - 999) + elif case == '2': + mantissa = '%f' % (float(mantissa) - 10) + exponent = '-' + exponent + else: + mantissa = '%f' % (float(mantissa) - 10) + exponent = '%03d' % abs((int(exponent) - 999)) + return float(mantissa + 'e' + exponent) + + def encode_datetime(self, value): + if isinstance(value, str) or isinstance(value, unicode): + return value + if isinstance(value, datetime): + return value.strftime(ISO8601) + else: + return value.isoformat() + + def decode_datetime(self, value): + """Handles both Dates and DateTime objects""" + if value is None: + return value + try: + if "T" in value: + if "." in value: + # Handle true "isoformat()" dates, which may have a microsecond on at the end of them + return datetime.strptime(value.split(".")[0], "%Y-%m-%dT%H:%M:%S") + else: + return datetime.strptime(value, ISO8601) + else: + value = value.split("-") + return date(int(value[0]), int(value[1]), int(value[2])) + except Exception, e: + return None + + def encode_date(self, value): + if isinstance(value, str) or isinstance(value, unicode): + return value + return value.isoformat() + + def decode_date(self, value): + try: + value = value.split("-") + return date(int(value[0]), int(value[1]), int(value[2])) + except: + return None + + encode_time = encode_date + + def decode_time(self, value): + """ converts strings in the form of HH:MM:SS.mmmmmm + (created by datetime.time.isoformat()) to + datetime.time objects. + + Timzone-aware strings ("HH:MM:SS.mmmmmm+HH:MM") won't + be handled right now and will raise TimeDecodeError. + """ + if '-' in value or '+' in value: + # TODO: Handle tzinfo + raise TimeDecodeError("Can't handle timezone aware objects: %r" % value) + tmp = value.split('.') + arg = map(int, tmp[0].split(':')) + if len(tmp) == 2: + arg.append(int(tmp[1])) + return time(*arg) + + def encode_reference(self, value): + if value in (None, 'None', '', ' '): + return None + if isinstance(value, str) or isinstance(value, unicode): + return value + else: + return value.id + + def decode_reference(self, value): + if not value or value == "None": + return None + return value + + def encode_blob(self, value): + if not value: + return None + if isinstance(value, str): + return value + + if not value.id: + bucket = self.manager.get_blob_bucket() + key = bucket.new_key(str(uuid.uuid4())) + value.id = "s3://%s/%s" % (key.bucket.name, key.name) + else: + match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value.id) + if match: + s3 = self.manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + key = bucket.get_key(match.group(2)) + else: + raise SDBPersistenceError("Invalid Blob ID: %s" % value.id) + + if value.value != None: + key.set_contents_from_string(value.value) + return value.id + + def decode_blob(self, value): + if not value: + return None + match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value) + if match: + s3 = self.manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + try: + key = bucket.get_key(match.group(2)) + except S3ResponseError, e: + if e.reason != "Forbidden": + raise + return None + else: + return None + if key: + return Blob(file=key, id="s3://%s/%s" % (key.bucket.name, key.name)) + else: + return None + + def encode_string(self, value): + """Convert ASCII, Latin-1 or UTF-8 to pure Unicode""" + if not isinstance(value, str): + return value + try: + return unicode(value, 'utf-8') + except: + # really, this should throw an exception. + # in the interest of not breaking current + # systems, however: + arr = [] + for ch in value: + arr.append(unichr(ord(ch))) + return u"".join(arr) + + def decode_string(self, value): + """Decoding a string is really nothing, just + return the value as-is""" + return value + + +class SDBManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, ddl_dir, enable_ssl, + consistent=None): + self.cls = cls + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.ddl_dir = ddl_dir + self.enable_ssl = enable_ssl + self.s3 = None + self.bucket = None + self.converter = SDBConverter(self) + self._sdb = None + self._domain = None + if consistent == None and hasattr(cls, "__consistent__"): + consistent = cls.__consistent__ + self.consistent = consistent + + @property + def sdb(self): + if self._sdb is None: + self._connect() + return self._sdb + + @property + def domain(self): + if self._domain is None: + self._connect() + return self._domain + + def _connect(self): + args = dict(aws_access_key_id=self.db_user, + aws_secret_access_key=self.db_passwd, + is_secure=self.enable_ssl) + try: + region = [x for x in boto.sdb.regions() if x.endpoint == self.db_host][0] + args['region'] = region + except IndexError: + pass + self._sdb = boto.connect_sdb(**args) + # This assumes that the domain has already been created + # It's much more efficient to do it this way rather than + # having this make a roundtrip each time to validate. + # The downside is that if the domain doesn't exist, it breaks + self._domain = self._sdb.lookup(self.db_name, validate=False) + if not self._domain: + self._domain = self._sdb.create_domain(self.db_name) + + def _object_lister(self, cls, query_lister): + for item in query_lister: + obj = self.get_object(cls, item.name, item) + if obj: + yield obj + + def encode_value(self, prop, value): + if value == None: + return None + if not prop: + return str(value) + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.db_user, self.db_passwd) + return self.s3 + + def get_blob_bucket(self, bucket_name=None): + s3 = self.get_s3_connection() + bucket_name = "%s-%s" % (s3.aws_access_key_id, self.domain.name) + bucket_name = bucket_name.lower() + try: + self.bucket = s3.get_bucket(bucket_name) + except: + self.bucket = s3.create_bucket(bucket_name) + return self.bucket + + def load_object(self, obj): + if not obj._loaded: + a = self.domain.get_attributes(obj.id, consistent_read=self.consistent) + if '__type__' in a: + for prop in obj.properties(hidden=False): + if prop.name in a: + value = self.decode_value(prop, a[prop.name]) + value = prop.make_value_from_datastore(value) + try: + setattr(obj, prop.name, value) + except Exception, e: + boto.log.exception(e) + obj._loaded = True + + def get_object(self, cls, id, a=None): + obj = None + if not a: + a = self.domain.get_attributes(id, consistent_read=self.consistent) + if '__type__' in a: + if not cls or a['__type__'] != cls.__name__: + cls = find_class(a['__module__'], a['__type__']) + if cls: + params = {} + for prop in cls.properties(hidden=False): + if prop.name in a: + value = self.decode_value(prop, a[prop.name]) + value = prop.make_value_from_datastore(value) + params[prop.name] = value + obj = cls(id, **params) + obj._loaded = True + else: + s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__']) + boto.log.info('sdbmanager: %s' % s) + return obj + + def get_object_from_id(self, id): + return self.get_object(None, id) + + def query(self, query): + query_str = "select * from `%s` %s" % (self.domain.name, self._build_filter_part(query.model_class, query.filters, query.sort_by, query.select)) + if query.limit: + query_str += " limit %s" % query.limit + rs = self.domain.select(query_str, max_items=query.limit, next_token = query.next_token) + query.rs = rs + return self._object_lister(query.model_class, rs) + + def count(self, cls, filters, quick=True, sort_by=None, select=None): + """ + Get the number of results that would + be returned in this query + """ + query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters, sort_by, select)) + count = 0 + for row in self.domain.select(query): + count += int(row['Count']) + if quick: + return count + return count + + def _build_filter(self, property, name, op, val): + if name == "__id__": + name = 'itemName()' + if name != "itemName()": + name = '`%s`' % name + if val == None: + if op in ('is', '='): + return "%(name)s is null" % {"name": name} + elif op in ('is not', '!='): + return "%s is not null" % name + else: + val = "" + if property.__class__ == ListProperty: + if op in ("is", "="): + op = "like" + elif op in ("!=", "not"): + op = "not like" + if not(op in ["like", "not like"] and val.startswith("%")): + val = "%%:%s" % val + return "%s %s '%s'" % (name, op, val.replace("'", "''")) + + def _build_filter_part(self, cls, filters, order_by=None, select=None): + """ + Build the filter part + """ + import types + query_parts = [] + + order_by_filtered = False + + if order_by: + if order_by[0] == "-": + order_by_method = "DESC" + order_by = order_by[1:] + else: + order_by_method = "ASC" + + if select: + if order_by and order_by in select: + order_by_filtered = True + query_parts.append("(%s)" % select) + + if isinstance(filters, str) or isinstance(filters, unicode): + query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__) + if order_by in ["__id__", "itemName()"]: + query += " ORDER BY itemName() %s" % order_by_method + elif order_by != None: + query += " ORDER BY `%s` %s" % (order_by, order_by_method) + return query + + for filter in filters: + filter_parts = [] + filter_props = filter[0] + if not isinstance(filter_props, list): + filter_props = [filter_props] + for filter_prop in filter_props: + (name, op) = filter_prop.strip().split(" ", 1) + value = filter[1] + property = cls.find_property(name) + if name == order_by: + order_by_filtered = True + if types.TypeType(value) == types.ListType: + filter_parts_sub = [] + for val in value: + val = self.encode_value(property, val) + if isinstance(val, list): + for v in val: + filter_parts_sub.append(self._build_filter(property, name, op, v)) + else: + filter_parts_sub.append(self._build_filter(property, name, op, val)) + filter_parts.append("(%s)" % (" OR ".join(filter_parts_sub))) + else: + val = self.encode_value(property, value) + if isinstance(val, list): + for v in val: + filter_parts.append(self._build_filter(property, name, op, v)) + else: + filter_parts.append(self._build_filter(property, name, op, val)) + query_parts.append("(%s)" % (" or ".join(filter_parts))) + + + type_query = "(`__type__` = '%s'" % cls.__name__ + for subclass in self._get_all_decendents(cls).keys(): + type_query += " or `__type__` = '%s'" % subclass + type_query += ")" + query_parts.append(type_query) + + order_by_query = "" + + if order_by: + if not order_by_filtered: + query_parts.append("`%s` LIKE '%%'" % order_by) + if order_by in ["__id__", "itemName()"]: + order_by_query = " ORDER BY itemName() %s" % order_by_method + else: + order_by_query = " ORDER BY `%s` %s" % (order_by, order_by_method) + + if len(query_parts) > 0: + return "WHERE %s %s" % (" AND ".join(query_parts), order_by_query) + else: + return "" + + + def _get_all_decendents(self, cls): + """Get all decendents for a given class""" + decendents = {} + for sc in cls.__sub_classes__: + decendents[sc.__name__] = sc + decendents.update(self._get_all_decendents(sc)) + return decendents + + def query_gql(self, query_string, *args, **kwds): + raise NotImplementedError("GQL queries not supported in SimpleDB") + + def save_object(self, obj, expected_value=None): + if not obj.id: + obj.id = str(uuid.uuid4()) + + attrs = {'__type__': obj.__class__.__name__, + '__module__': obj.__class__.__module__, + '__lineage__': obj.get_lineage()} + del_attrs = [] + for property in obj.properties(hidden=False): + value = property.get_value_for_datastore(obj) + if value is not None: + value = self.encode_value(property, value) + if value == []: + value = None + if value == None: + del_attrs.append(property.name) + continue + attrs[property.name] = value + if property.unique: + try: + args = {property.name: value} + obj2 = obj.find(**args).next() + if obj2.id != obj.id: + raise SDBPersistenceError("Error: %s must be unique!" % property.name) + except(StopIteration): + pass + # Convert the Expected value to SDB format + if expected_value: + prop = obj.find_property(expected_value[0]) + v = expected_value[1] + if v is not None and not isinstance(v, bool): + v = self.encode_value(prop, v) + expected_value[1] = v + self.domain.put_attributes(obj.id, attrs, replace=True, expected_value=expected_value) + if len(del_attrs) > 0: + self.domain.delete_attributes(obj.id, del_attrs) + return obj + + def delete_object(self, obj): + self.domain.delete_attributes(obj.id) + + def set_property(self, prop, obj, name, value): + setattr(obj, name, value) + value = prop.get_value_for_datastore(obj) + value = self.encode_value(prop, value) + if prop.unique: + try: + args = {prop.name: value} + obj2 = obj.find(**args).next() + if obj2.id != obj.id: + raise SDBPersistenceError("Error: %s must be unique!" % prop.name) + except(StopIteration): + pass + self.domain.put_attributes(obj.id, {name: value}, replace=True) + + def get_property(self, prop, obj, name): + a = self.domain.get_attributes(obj.id, consistent_read=self.consistent) + + # try to get the attribute value from SDB + if name in a: + value = self.decode_value(prop, a[name]) + value = prop.make_value_from_datastore(value) + setattr(obj, prop.name, value) + return value + raise AttributeError('%s not found' % name) + + def set_key_value(self, obj, name, value): + self.domain.put_attributes(obj.id, {name: value}, replace=True) + + def delete_key_value(self, obj, name): + self.domain.delete_attributes(obj.id, name) + + def get_key_value(self, obj, name): + a = self.domain.get_attributes(obj.id, name, consistent_read=self.consistent) + if name in a: + return a[name] + else: + return None + + def get_raw_item(self, obj): + return self.domain.get_item(obj.id) diff --git a/awx/lib/site-packages/boto/sdb/db/manager/xmlmanager.py b/awx/lib/site-packages/boto/sdb/db/manager/xmlmanager.py new file mode 100644 index 0000000000..04210db85e --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/manager/xmlmanager.py @@ -0,0 +1,517 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +from boto.utils import find_class, Password +from boto.sdb.db.key import Key +from boto.sdb.db.model import Model +from datetime import datetime +from xml.dom.minidom import getDOMImplementation, parse, parseString, Node + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + +class XMLConverter: + """ + Responsible for converting base Python types to format compatible with underlying + database. For SimpleDB, that means everything needs to be converted to a string + when stored in SimpleDB and from a string when retrieved. + + To convert a value, pass it to the encode or decode method. The encode method + will take a Python native value and convert to DB format. The decode method will + take a DB format value and convert it to Python native format. To find the appropriate + method to call, the generic encode/decode methods will look for the type-specific + method by searching for a method called "encode_" or "decode_". + """ + def __init__(self, manager): + self.manager = manager + self.type_map = { bool : (self.encode_bool, self.decode_bool), + int : (self.encode_int, self.decode_int), + long : (self.encode_long, self.decode_long), + Model : (self.encode_reference, self.decode_reference), + Key : (self.encode_reference, self.decode_reference), + Password : (self.encode_password, self.decode_password), + datetime : (self.encode_datetime, self.decode_datetime)} + + def get_text_value(self, parent_node): + value = '' + for node in parent_node.childNodes: + if node.nodeType == node.TEXT_NODE: + value += node.data + return value + + def encode(self, item_type, value): + if item_type in self.type_map: + encode = self.type_map[item_type][0] + return encode(value) + return value + + def decode(self, item_type, value): + if item_type in self.type_map: + decode = self.type_map[item_type][1] + return decode(value) + else: + value = self.get_text_value(value) + return value + + def encode_prop(self, prop, value): + if isinstance(value, list): + if hasattr(prop, 'item_type'): + new_value = [] + for v in value: + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + new_value.append(self.encode(item_type, v)) + return new_value + else: + return value + else: + return self.encode(prop.data_type, value) + + def decode_prop(self, prop, value): + if prop.data_type == list: + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + values = [] + for item_node in value.getElementsByTagName('item'): + value = self.decode(item_type, item_node) + values.append(value) + return values + else: + return self.get_text_value(value) + else: + return self.decode(prop.data_type, value) + + def encode_int(self, value): + value = int(value) + return '%d' % value + + def decode_int(self, value): + value = self.get_text_value(value) + if value: + value = int(value) + else: + value = None + return value + + def encode_long(self, value): + value = long(value) + return '%d' % value + + def decode_long(self, value): + value = self.get_text_value(value) + return long(value) + + def encode_bool(self, value): + if value == True: + return 'true' + else: + return 'false' + + def decode_bool(self, value): + value = self.get_text_value(value) + if value.lower() == 'true': + return True + else: + return False + + def encode_datetime(self, value): + return value.strftime(ISO8601) + + def decode_datetime(self, value): + value = self.get_text_value(value) + try: + return datetime.strptime(value, ISO8601) + except: + return None + + def encode_reference(self, value): + if isinstance(value, str) or isinstance(value, unicode): + return value + if value == None: + return '' + else: + val_node = self.manager.doc.createElement("object") + val_node.setAttribute('id', value.id) + val_node.setAttribute('class', '%s.%s' % (value.__class__.__module__, value.__class__.__name__)) + return val_node + + def decode_reference(self, value): + if not value: + return None + try: + value = value.childNodes[0] + class_name = value.getAttribute("class") + id = value.getAttribute("id") + cls = find_class(class_name) + return cls.get_by_ids(id) + except: + return None + + def encode_password(self, value): + if value and len(value) > 0: + return str(value) + else: + return None + + def decode_password(self, value): + value = self.get_text_value(value) + return Password(value) + + +class XMLManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, ddl_dir, enable_ssl): + self.cls = cls + if not db_name: + db_name = cls.__name__.lower() + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.ddl_dir = ddl_dir + self.s3 = None + self.converter = XMLConverter(self) + self.impl = getDOMImplementation() + self.doc = self.impl.createDocument(None, 'objects', None) + + self.connection = None + self.enable_ssl = enable_ssl + self.auth_header = None + if self.db_user: + import base64 + base64string = base64.encodestring('%s:%s' % (self.db_user, self.db_passwd))[:-1] + authheader = "Basic %s" % base64string + self.auth_header = authheader + + def _connect(self): + if self.db_host: + if self.enable_ssl: + from httplib import HTTPSConnection as Connection + else: + from httplib import HTTPConnection as Connection + + self.connection = Connection(self.db_host, self.db_port) + + def _make_request(self, method, url, post_data=None, body=None): + """ + Make a request on this connection + """ + if not self.connection: + self._connect() + try: + self.connection.close() + except: + pass + self.connection.connect() + headers = {} + if self.auth_header: + headers["Authorization"] = self.auth_header + self.connection.request(method, url, body, headers) + resp = self.connection.getresponse() + return resp + + def new_doc(self): + return self.impl.createDocument(None, 'objects', None) + + def _object_lister(self, cls, doc): + for obj_node in doc.getElementsByTagName('object'): + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + id = obj_node.getAttribute('id') + obj = cls(id) + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = obj.find_property(prop_name) + if prop: + if hasattr(prop, 'item_type'): + value = self.get_list(prop_node, prop.item_type) + else: + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + setattr(obj, prop.name, value) + yield obj + + def reset(self): + self._connect() + + def get_doc(self): + return self.doc + + def encode_value(self, prop, value): + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key) + return self.s3 + + def get_list(self, prop_node, item_type): + values = [] + try: + items_node = prop_node.getElementsByTagName('items')[0] + except: + return [] + for item_node in items_node.getElementsByTagName('item'): + value = self.converter.decode(item_type, item_node) + values.append(value) + return values + + def get_object_from_doc(self, cls, id, doc): + obj_node = doc.getElementsByTagName('object')[0] + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + if not id: + id = obj_node.getAttribute('id') + obj = cls(id) + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = obj.find_property(prop_name) + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + if value != None: + try: + setattr(obj, prop.name, value) + except: + pass + return obj + + def get_props_from_doc(self, cls, id, doc): + """ + Pull out the properties from this document + Returns the class, the properties in a hash, and the id if provided as a tuple + :return: (cls, props, id) + """ + obj_node = doc.getElementsByTagName('object')[0] + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + if not id: + id = obj_node.getAttribute('id') + props = {} + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = cls.find_property(prop_name) + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + if value != None: + props[prop.name] = value + return (cls, props, id) + + + def get_object(self, cls, id): + if not self.connection: + self._connect() + + if not self.connection: + raise NotImplementedError("Can't query without a database connection") + url = "/%s/%s" % (self.db_name, id) + resp = self._make_request('GET', url) + if resp.status == 200: + doc = parse(resp) + else: + raise Exception("Error: %s" % resp.status) + return self.get_object_from_doc(cls, id, doc) + + def query(self, cls, filters, limit=None, order_by=None): + if not self.connection: + self._connect() + + if not self.connection: + raise NotImplementedError("Can't query without a database connection") + + from urllib import urlencode + + query = str(self._build_query(cls, filters, limit, order_by)) + if query: + url = "/%s?%s" % (self.db_name, urlencode({"query": query})) + else: + url = "/%s" % self.db_name + resp = self._make_request('GET', url) + if resp.status == 200: + doc = parse(resp) + else: + raise Exception("Error: %s" % resp.status) + return self._object_lister(cls, doc) + + def _build_query(self, cls, filters, limit, order_by): + import types + if len(filters) > 4: + raise Exception('Too many filters, max is 4') + parts = [] + properties = cls.properties(hidden=False) + for filter, value in filters: + name, op = filter.strip().split() + found = False + for property in properties: + if property.name == name: + found = True + if types.TypeType(value) == types.ListType: + filter_parts = [] + for val in value: + val = self.encode_value(property, val) + filter_parts.append("'%s' %s '%s'" % (name, op, val)) + parts.append("[%s]" % " OR ".join(filter_parts)) + else: + value = self.encode_value(property, value) + parts.append("['%s' %s '%s']" % (name, op, value)) + if not found: + raise Exception('%s is not a valid field' % name) + if order_by: + if order_by.startswith("-"): + key = order_by[1:] + type = "desc" + else: + key = order_by + type = "asc" + parts.append("['%s' starts-with ''] sort '%s' %s" % (key, key, type)) + return ' intersection '.join(parts) + + def query_gql(self, query_string, *args, **kwds): + raise NotImplementedError("GQL queries not supported in XML") + + def save_list(self, doc, items, prop_node): + items_node = doc.createElement('items') + prop_node.appendChild(items_node) + for item in items: + item_node = doc.createElement('item') + items_node.appendChild(item_node) + if isinstance(item, Node): + item_node.appendChild(item) + else: + text_node = doc.createTextNode(item) + item_node.appendChild(text_node) + + def save_object(self, obj, expected_value=None): + """ + Marshal the object and do a PUT + """ + doc = self.marshal_object(obj) + if obj.id: + url = "/%s/%s" % (self.db_name, obj.id) + else: + url = "/%s" % (self.db_name) + resp = self._make_request("PUT", url, body=doc.toxml()) + new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp)) + obj.id = new_obj.id + for prop in obj.properties(): + try: + propname = prop.name + except AttributeError: + propname = None + if propname: + value = getattr(new_obj, prop.name) + if value: + setattr(obj, prop.name, value) + return obj + + + def marshal_object(self, obj, doc=None): + if not doc: + doc = self.new_doc() + if not doc: + doc = self.doc + obj_node = doc.createElement('object') + + if obj.id: + obj_node.setAttribute('id', obj.id) + + obj_node.setAttribute('class', '%s.%s' % (obj.__class__.__module__, + obj.__class__.__name__)) + root = doc.documentElement + root.appendChild(obj_node) + for property in obj.properties(hidden=False): + prop_node = doc.createElement('property') + prop_node.setAttribute('name', property.name) + prop_node.setAttribute('type', property.type_name) + value = property.get_value_for_datastore(obj) + if value is not None: + value = self.encode_value(property, value) + if isinstance(value, list): + self.save_list(doc, value, prop_node) + elif isinstance(value, Node): + prop_node.appendChild(value) + else: + text_node = doc.createTextNode(unicode(value).encode("ascii", "ignore")) + prop_node.appendChild(text_node) + obj_node.appendChild(prop_node) + + return doc + + def unmarshal_object(self, fp, cls=None, id=None): + if isinstance(fp, str) or isinstance(fp, unicode): + doc = parseString(fp) + else: + doc = parse(fp) + return self.get_object_from_doc(cls, id, doc) + + def unmarshal_props(self, fp, cls=None, id=None): + """ + Same as unmarshalling an object, except it returns + from "get_props_from_doc" + """ + if isinstance(fp, str) or isinstance(fp, unicode): + doc = parseString(fp) + else: + doc = parse(fp) + return self.get_props_from_doc(cls, id, doc) + + def delete_object(self, obj): + url = "/%s/%s" % (self.db_name, obj.id) + return self._make_request("DELETE", url) + + def set_key_value(self, obj, name, value): + self.domain.put_attributes(obj.id, {name : value}, replace=True) + + def delete_key_value(self, obj, name): + self.domain.delete_attributes(obj.id, name) + + def get_key_value(self, obj, name): + a = self.domain.get_attributes(obj.id, name) + if name in a: + return a[name] + else: + return None + + def get_raw_item(self, obj): + return self.domain.get_item(obj.id) + + def set_property(self, prop, obj, name, value): + pass + + def get_property(self, prop, obj, name): + pass + + def load_object(self, obj): + if not obj._loaded: + obj = obj.get_by_id(obj.id) + obj._loaded = True + return obj + diff --git a/awx/lib/site-packages/boto/sdb/db/model.py b/awx/lib/site-packages/boto/sdb/db/model.py new file mode 100644 index 0000000000..3d9a6b5afb --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/model.py @@ -0,0 +1,297 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sdb.db.property import Property +from boto.sdb.db.key import Key +from boto.sdb.db.query import Query +import boto + +class ModelMeta(type): + "Metaclass for all Models" + + def __init__(cls, name, bases, dict): + super(ModelMeta, cls).__init__(name, bases, dict) + # Make sure this is a subclass of Model - mainly copied from django ModelBase (thanks!) + cls.__sub_classes__ = [] + + # Do a delayed import to prevent possible circular import errors. + from boto.sdb.db.manager import get_manager + + try: + if filter(lambda b: issubclass(b, Model), bases): + for base in bases: + base.__sub_classes__.append(cls) + cls._manager = get_manager(cls) + # look for all of the Properties and set their names + for key in dict.keys(): + if isinstance(dict[key], Property): + property = dict[key] + property.__property_config__(cls, key) + prop_names = [] + props = cls.properties() + for prop in props: + if not prop.__class__.__name__.startswith('_'): + prop_names.append(prop.name) + setattr(cls, '_prop_names', prop_names) + except NameError: + # 'Model' isn't defined yet, meaning we're looking at our own + # Model class, defined below. + pass + +class Model(object): + __metaclass__ = ModelMeta + __consistent__ = False # Consistent is set off by default + id = None + + @classmethod + def get_lineage(cls): + l = [c.__name__ for c in cls.mro()] + l.reverse() + return '.'.join(l) + + @classmethod + def kind(cls): + return cls.__name__ + + @classmethod + def _get_by_id(cls, id, manager=None): + if not manager: + manager = cls._manager + return manager.get_object(cls, id) + + @classmethod + def get_by_id(cls, ids=None, parent=None): + if isinstance(ids, list): + objs = [cls._get_by_id(id) for id in ids] + return objs + else: + return cls._get_by_id(ids) + + get_by_ids = get_by_id + + @classmethod + def get_by_key_name(cls, key_names, parent=None): + raise NotImplementedError("Key Names are not currently supported") + + @classmethod + def find(cls, limit=None, next_token=None, **params): + q = Query(cls, limit=limit, next_token=next_token) + for key, value in params.items(): + q.filter('%s =' % key, value) + return q + + @classmethod + def all(cls, limit=None, next_token=None): + return cls.find(limit=limit, next_token=next_token) + + @classmethod + def get_or_insert(key_name, **kw): + raise NotImplementedError("get_or_insert not currently supported") + + @classmethod + def properties(cls, hidden=True): + properties = [] + while cls: + for key in cls.__dict__.keys(): + prop = cls.__dict__[key] + if isinstance(prop, Property): + if hidden or not prop.__class__.__name__.startswith('_'): + properties.append(prop) + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return properties + + @classmethod + def find_property(cls, prop_name): + property = None + while cls: + for key in cls.__dict__.keys(): + prop = cls.__dict__[key] + if isinstance(prop, Property): + if not prop.__class__.__name__.startswith('_') and prop_name == prop.name: + property = prop + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return property + + @classmethod + def get_xmlmanager(cls): + if not hasattr(cls, '_xmlmanager'): + from boto.sdb.db.manager.xmlmanager import XMLManager + cls._xmlmanager = XMLManager(cls, None, None, None, + None, None, None, None, False) + return cls._xmlmanager + + @classmethod + def from_xml(cls, fp): + xmlmanager = cls.get_xmlmanager() + return xmlmanager.unmarshal_object(fp) + + def __init__(self, id=None, **kw): + self._loaded = False + # first try to initialize all properties to their default values + for prop in self.properties(hidden=False): + try: + setattr(self, prop.name, prop.default_value()) + except ValueError: + pass + if 'manager' in kw: + self._manager = kw['manager'] + self.id = id + for key in kw: + if key != 'manager': + # We don't want any errors populating up when loading an object, + # so if it fails we just revert to it's default value + try: + setattr(self, key, kw[key]) + except Exception, e: + boto.log.exception(e) + + def __repr__(self): + return '%s<%s>' % (self.__class__.__name__, self.id) + + def __str__(self): + return str(self.id) + + def __eq__(self, other): + return other and isinstance(other, Model) and self.id == other.id + + def _get_raw_item(self): + return self._manager.get_raw_item(self) + + def load(self): + if self.id and not self._loaded: + self._manager.load_object(self) + + def reload(self): + if self.id: + self._loaded = False + self._manager.load_object(self) + + def put(self, expected_value=None): + """ + Save this object as it is, with an optional expected value + + :param expected_value: Optional tuple of Attribute, and Value that + must be the same in order to save this object. If this + condition is not met, an SDBResponseError will be raised with a + Confict status code. + :type expected_value: tuple or list + :return: This object + :rtype: :class:`boto.sdb.db.model.Model` + """ + self._manager.save_object(self, expected_value) + return self + + save = put + + def put_attributes(self, attrs): + """ + Save just these few attributes, not the whole object + + :param attrs: Attributes to save, key->value dict + :type attrs: dict + :return: self + :rtype: :class:`boto.sdb.db.model.Model` + """ + assert(isinstance(attrs, dict)), "Argument must be a dict of key->values to save" + for prop_name in attrs: + value = attrs[prop_name] + prop = self.find_property(prop_name) + assert(prop), "Property not found: %s" % prop_name + self._manager.set_property(prop, self, prop_name, value) + self.reload() + return self + + def delete_attributes(self, attrs): + """ + Delete just these attributes, not the whole object. + + :param attrs: Attributes to save, as a list of string names + :type attrs: list + :return: self + :rtype: :class:`boto.sdb.db.model.Model` + """ + assert(isinstance(attrs, list)), "Argument must be a list of names of keys to delete." + self._manager.domain.delete_attributes(self.id, attrs) + self.reload() + return self + + save_attributes = put_attributes + + def delete(self): + self._manager.delete_object(self) + + def key(self): + return Key(obj=self) + + def set_manager(self, manager): + self._manager = manager + + def to_dict(self): + props = {} + for prop in self.properties(hidden=False): + props[prop.name] = getattr(self, prop.name) + obj = {'properties' : props, + 'id' : self.id} + return {self.__class__.__name__ : obj} + + def to_xml(self, doc=None): + xmlmanager = self.get_xmlmanager() + doc = xmlmanager.marshal_object(self, doc) + return doc + + @classmethod + def find_subclass(cls, name): + """Find a subclass with a given name""" + if name == cls.__name__: + return cls + for sc in cls.__sub_classes__: + r = sc.find_subclass(name) + if r != None: + return r + +class Expando(Model): + + def __setattr__(self, name, value): + if name in self._prop_names: + object.__setattr__(self, name, value) + elif name.startswith('_'): + object.__setattr__(self, name, value) + elif name == 'id': + object.__setattr__(self, name, value) + else: + self._manager.set_key_value(self, name, value) + object.__setattr__(self, name, value) + + def __getattr__(self, name): + if not name.startswith('_'): + value = self._manager.get_key_value(self, name) + if value: + object.__setattr__(self, name, value) + return value + raise AttributeError + + diff --git a/awx/lib/site-packages/boto/sdb/db/property.py b/awx/lib/site-packages/boto/sdb/db/property.py new file mode 100644 index 0000000000..b8610cfeff --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/property.py @@ -0,0 +1,703 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import datetime +from key import Key +from boto.utils import Password +from boto.sdb.db.query import Query +import re +import boto +import boto.s3.key +from boto.sdb.db.blob import Blob + + +class Property(object): + + data_type = str + type_name = '' + name = '' + verbose_name = '' + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, unique=False): + self.verbose_name = verbose_name + self.name = name + self.default = default + self.required = required + self.validator = validator + self.choices = choices + if self.name: + self.slot_name = '_' + self.name + else: + self.slot_name = '_' + self.unique = unique + + def __get__(self, obj, objtype): + if obj: + obj.load() + return getattr(obj, self.slot_name) + else: + return None + + def __set__(self, obj, value): + self.validate(value) + + # Fire off any on_set functions + try: + if obj._loaded and hasattr(obj, "on_set_%s" % self.name): + fnc = getattr(obj, "on_set_%s" % self.name) + value = fnc(value) + except Exception: + boto.log.exception("Exception running on_set_%s" % self.name) + + setattr(obj, self.slot_name, value) + + def __property_config__(self, model_class, property_name): + self.model_class = model_class + self.name = property_name + self.slot_name = '_' + self.name + + def default_validator(self, value): + if isinstance(value, basestring) or value == self.default_value(): + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, %s.%s expecting %s, got %s' % (self.model_class.__name__, self.name, self.data_type, type(value))) + + def default_value(self): + return self.default + + def validate(self, value): + if self.required and value == None: + raise ValueError('%s is a required property' % self.name) + if self.choices and value and not value in self.choices: + raise ValueError('%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name)) + if self.validator: + self.validator(value) + else: + self.default_validator(value) + return value + + def empty(self, value): + return not value + + def get_value_for_datastore(self, model_instance): + return getattr(model_instance, self.name) + + def make_value_from_datastore(self, value): + return value + + def get_choices(self): + if callable(self.choices): + return self.choices() + return self.choices + + +def validate_string(value): + if value == None: + return + elif isinstance(value, str) or isinstance(value, unicode): + if len(value) > 1024: + raise ValueError('Length of value greater than maxlength') + else: + raise TypeError('Expecting String, got %s' % type(value)) + + +class StringProperty(Property): + + type_name = 'String' + + def __init__(self, verbose_name=None, name=None, default='', + required=False, validator=validate_string, + choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, + validator, choices, unique) + + +class TextProperty(Property): + + type_name = 'Text' + + def __init__(self, verbose_name=None, name=None, default='', + required=False, validator=None, choices=None, + unique=False, max_length=None): + Property.__init__(self, verbose_name, name, default, required, + validator, choices, unique) + self.max_length = max_length + + def validate(self, value): + value = super(TextProperty, self).validate(value) + if not isinstance(value, str) and not isinstance(value, unicode): + raise TypeError('Expecting Text, got %s' % type(value)) + if self.max_length and len(value) > self.max_length: + raise ValueError('Length of value greater than maxlength %s' % self.max_length) + + +class PasswordProperty(StringProperty): + """ + + Hashed property whose original value can not be + retrieved, but still can be compared. + + Works by storing a hash of the original value instead + of the original value. Once that's done all that + can be retrieved is the hash. + + The comparison + + obj.password == 'foo' + + generates a hash of 'foo' and compares it to the + stored hash. + + Underlying data type for hashing, storing, and comparing + is boto.utils.Password. The default hash function is + defined there ( currently sha512 in most cases, md5 + where sha512 is not available ) + + It's unlikely you'll ever need to use a different hash + function, but if you do, you can control the behavior + in one of two ways: + + 1) Specifying hashfunc in PasswordProperty constructor + + import hashlib + + class MyModel(model): + password = PasswordProperty(hashfunc=hashlib.sha224) + + 2) Subclassing Password and PasswordProperty + + class SHA224Password(Password): + hashfunc=hashlib.sha224 + + class SHA224PasswordProperty(PasswordProperty): + data_type=MyPassword + type_name="MyPassword" + + class MyModel(Model): + password = SHA224PasswordProperty() + + """ + data_type = Password + type_name = 'Password' + + def __init__(self, verbose_name=None, name=None, default='', required=False, + validator=None, choices=None, unique=False, hashfunc=None): + + """ + The hashfunc parameter overrides the default hashfunc in boto.utils.Password. + + The remaining parameters are passed through to StringProperty.__init__""" + + StringProperty.__init__(self, verbose_name, name, default, required, + validator, choices, unique) + self.hashfunc = hashfunc + + def make_value_from_datastore(self, value): + p = self.data_type(value, hashfunc=self.hashfunc) + return p + + def get_value_for_datastore(self, model_instance): + value = StringProperty.get_value_for_datastore(self, model_instance) + if value and len(value): + return str(value) + else: + return None + + def __set__(self, obj, value): + if not isinstance(value, self.data_type): + p = self.data_type(hashfunc=self.hashfunc) + p.set(value) + value = p + Property.__set__(self, obj, value) + + def __get__(self, obj, objtype): + return self.data_type(StringProperty.__get__(self, obj, objtype), hashfunc=self.hashfunc) + + def validate(self, value): + value = Property.validate(self, value) + if isinstance(value, self.data_type): + if len(value) > 1024: + raise ValueError('Length of value greater than maxlength') + else: + raise TypeError('Expecting %s, got %s' % (type(self.data_type), type(value))) + + +class BlobProperty(Property): + data_type = Blob + type_name = "blob" + + def __set__(self, obj, value): + if value != self.default_value(): + if not isinstance(value, Blob): + oldb = self.__get__(obj, type(obj)) + id = None + if oldb: + id = oldb.id + b = Blob(value=value, id=id) + value = b + Property.__set__(self, obj, value) + + +class S3KeyProperty(Property): + + data_type = boto.s3.key.Key + type_name = 'S3Key' + validate_regex = "^s3:\/\/([^\/]*)\/(.*)$" + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, + validator, choices, unique) + + def validate(self, value): + value = super(S3KeyProperty, self).validate(value) + if value == self.default_value() or value == str(self.default_value()): + return self.default_value() + if isinstance(value, self.data_type): + return + match = re.match(self.validate_regex, value) + if match: + return + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + def __get__(self, obj, objtype): + value = Property.__get__(self, obj, objtype) + if value: + if isinstance(value, self.data_type): + return value + match = re.match(self.validate_regex, value) + if match: + s3 = obj._manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + k = bucket.get_key(match.group(2)) + if not k: + k = bucket.new_key(match.group(2)) + k.set_contents_from_string("") + return k + else: + return value + + def get_value_for_datastore(self, model_instance): + value = Property.get_value_for_datastore(self, model_instance) + if value: + return "s3://%s/%s" % (value.bucket.name, value.name) + else: + return None + + +class IntegerProperty(Property): + + data_type = int + type_name = 'Integer' + + def __init__(self, verbose_name=None, name=None, default=0, required=False, + validator=None, choices=None, unique=False, max=2147483647, min=-2147483648): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + self.max = max + self.min = min + + def validate(self, value): + value = int(value) + value = Property.validate(self, value) + if value > self.max: + raise ValueError('Maximum value is %d' % self.max) + if value < self.min: + raise ValueError('Minimum value is %d' % self.min) + return value + + def empty(self, value): + return value is None + + def __set__(self, obj, value): + if value == "" or value == None: + value = 0 + return Property.__set__(self, obj, value) + + +class LongProperty(Property): + + data_type = long + type_name = 'Long' + + def __init__(self, verbose_name=None, name=None, default=0, required=False, + validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = long(value) + value = Property.validate(self, value) + min = -9223372036854775808 + max = 9223372036854775807 + if value > max: + raise ValueError('Maximum value is %d' % max) + if value < min: + raise ValueError('Minimum value is %d' % min) + return value + + def empty(self, value): + return value is None + + +class BooleanProperty(Property): + + data_type = bool + type_name = 'Boolean' + + def __init__(self, verbose_name=None, name=None, default=False, required=False, + validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + + def empty(self, value): + return value is None + + +class FloatProperty(Property): + + data_type = float + type_name = 'Float' + + def __init__(self, verbose_name=None, name=None, default=0.0, required=False, + validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = float(value) + value = Property.validate(self, value) + return value + + def empty(self, value): + return value is None + + +class DateTimeProperty(Property): + """This class handles both the datetime.datetime object + And the datetime.date objects. It can return either one, + depending on the value stored in the database""" + + data_type = datetime.datetime + type_name = 'DateTime' + + def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + self.auto_now = auto_now + self.auto_now_add = auto_now_add + + def default_value(self): + if self.auto_now or self.auto_now_add: + return self.now() + return Property.default_value(self) + + def validate(self, value): + if value == None: + return + if isinstance(value, datetime.date): + return value + return super(DateTimeProperty, self).validate(value) + + def get_value_for_datastore(self, model_instance): + if self.auto_now: + setattr(model_instance, self.name, self.now()) + return Property.get_value_for_datastore(self, model_instance) + + def now(self): + return datetime.datetime.utcnow() + + +class DateProperty(Property): + + data_type = datetime.date + type_name = 'Date' + + def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + self.auto_now = auto_now + self.auto_now_add = auto_now_add + + def default_value(self): + if self.auto_now or self.auto_now_add: + return self.now() + return Property.default_value(self) + + def validate(self, value): + value = super(DateProperty, self).validate(value) + if value == None: + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + def get_value_for_datastore(self, model_instance): + if self.auto_now: + setattr(model_instance, self.name, self.now()) + val = Property.get_value_for_datastore(self, model_instance) + if isinstance(val, datetime.datetime): + val = val.date() + return val + + def now(self): + return datetime.date.today() + + +class TimeProperty(Property): + data_type = datetime.time + type_name = 'Time' + + def __init__(self, verbose_name=None, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = super(TimeProperty, self).validate(value) + if value is None: + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + +class ReferenceProperty(Property): + + data_type = Key + type_name = 'Reference' + + def __init__(self, reference_class=None, collection_name=None, + verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + self.reference_class = reference_class + self.collection_name = collection_name + + def __get__(self, obj, objtype): + if obj: + value = getattr(obj, self.slot_name) + if value == self.default_value(): + return value + # If the value is still the UUID for the referenced object, we need to create + # the object now that is the attribute has actually been accessed. This lazy + # instantiation saves unnecessary roundtrips to SimpleDB + if isinstance(value, str) or isinstance(value, unicode): + value = self.reference_class(value) + setattr(obj, self.name, value) + return value + + def __set__(self, obj, value): + """Don't allow this object to be associated to itself + This causes bad things to happen""" + if value != None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)): + raise ValueError("Can not associate an object with itself!") + return super(ReferenceProperty, self).__set__(obj, value) + + def __property_config__(self, model_class, property_name): + Property.__property_config__(self, model_class, property_name) + if self.collection_name is None: + self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name) + if hasattr(self.reference_class, self.collection_name): + raise ValueError('duplicate property: %s' % self.collection_name) + setattr(self.reference_class, self.collection_name, + _ReverseReferenceProperty(model_class, property_name, self.collection_name)) + + def check_uuid(self, value): + # This does a bit of hand waving to "type check" the string + t = value.split('-') + if len(t) != 5: + raise ValueError + + def check_instance(self, value): + try: + obj_lineage = value.get_lineage() + cls_lineage = self.reference_class.get_lineage() + if obj_lineage.startswith(cls_lineage): + return + raise TypeError('%s not instance of %s' % (obj_lineage, cls_lineage)) + except: + raise ValueError('%s is not a Model' % value) + + def validate(self, value): + if self.validator: + self.validator(value) + if self.required and value == None: + raise ValueError('%s is a required property' % self.name) + if value == self.default_value(): + return + if not isinstance(value, str) and not isinstance(value, unicode): + self.check_instance(value) + + +class _ReverseReferenceProperty(Property): + data_type = Query + type_name = 'query' + + def __init__(self, model, prop, name): + self.__model = model + self.__property = prop + self.collection_name = prop + self.name = name + self.item_type = model + + def __get__(self, model_instance, model_class): + """Fetches collection of model instances of this collection property.""" + if model_instance is not None: + query = Query(self.__model) + if isinstance(self.__property, list): + props = [] + for prop in self.__property: + props.append("%s =" % prop) + return query.filter(props, model_instance) + else: + return query.filter(self.__property + ' =', model_instance) + else: + return self + + def __set__(self, model_instance, value): + """Not possible to set a new collection.""" + raise ValueError('Virtual property is read-only') + + +class CalculatedProperty(Property): + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, + calculated_type=int, unique=False, use_method=False): + Property.__init__(self, verbose_name, name, default, required, + validator, choices, unique) + self.calculated_type = calculated_type + self.use_method = use_method + + def __get__(self, obj, objtype): + value = self.default_value() + if obj: + try: + value = getattr(obj, self.slot_name) + if self.use_method: + value = value() + except AttributeError: + pass + return value + + def __set__(self, obj, value): + """Not possible to set a new AutoID.""" + pass + + def _set_direct(self, obj, value): + if not self.use_method: + setattr(obj, self.slot_name, value) + + def get_value_for_datastore(self, model_instance): + if self.calculated_type in [str, int, bool]: + value = self.__get__(model_instance, model_instance.__class__) + return value + else: + return None + + +class ListProperty(Property): + + data_type = list + type_name = 'List' + + def __init__(self, item_type, verbose_name=None, name=None, default=None, **kwds): + if default is None: + default = [] + self.item_type = item_type + Property.__init__(self, verbose_name, name, default=default, required=True, **kwds) + + def validate(self, value): + if self.validator: + self.validator(value) + if value is not None: + if not isinstance(value, list): + value = [value] + + if self.item_type in (int, long): + item_type = (int, long) + elif self.item_type in (str, unicode): + item_type = (str, unicode) + else: + item_type = self.item_type + + for item in value: + if not isinstance(item, item_type): + if item_type == (int, long): + raise ValueError('Items in the %s list must all be integers.' % self.name) + else: + raise ValueError('Items in the %s list must all be %s instances' % + (self.name, self.item_type.__name__)) + return value + + def empty(self, value): + return value is None + + def default_value(self): + return list(super(ListProperty, self).default_value()) + + def __set__(self, obj, value): + """Override the set method to allow them to set the property to an instance of the item_type instead of requiring a list to be passed in""" + if self.item_type in (int, long): + item_type = (int, long) + elif self.item_type in (str, unicode): + item_type = (str, unicode) + else: + item_type = self.item_type + if isinstance(value, item_type): + value = [value] + elif value == None: # Override to allow them to set this to "None" to remove everything + value = [] + return super(ListProperty, self).__set__(obj, value) + + +class MapProperty(Property): + + data_type = dict + type_name = 'Map' + + def __init__(self, item_type=str, verbose_name=None, name=None, default=None, **kwds): + if default is None: + default = {} + self.item_type = item_type + Property.__init__(self, verbose_name, name, default=default, required=True, **kwds) + + def validate(self, value): + value = super(MapProperty, self).validate(value) + if value is not None: + if not isinstance(value, dict): + raise ValueError('Value must of type dict') + + if self.item_type in (int, long): + item_type = (int, long) + elif self.item_type in (str, unicode): + item_type = (str, unicode) + else: + item_type = self.item_type + + for key in value: + if not isinstance(value[key], item_type): + if item_type == (int, long): + raise ValueError('Values in the %s Map must all be integers.' % self.name) + else: + raise ValueError('Values in the %s Map must all be %s instances' % + (self.name, self.item_type.__name__)) + return value + + def empty(self, value): + return value is None + + def default_value(self): + return {} diff --git a/awx/lib/site-packages/boto/sdb/db/query.py b/awx/lib/site-packages/boto/sdb/db/query.py new file mode 100644 index 0000000000..31b71aa03b --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/query.py @@ -0,0 +1,85 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Query(object): + __local_iter__ = None + def __init__(self, model_class, limit=None, next_token=None, manager=None): + self.model_class = model_class + self.limit = limit + self.offset = 0 + if manager: + self.manager = manager + else: + self.manager = self.model_class._manager + self.filters = [] + self.select = None + self.sort_by = None + self.rs = None + self.next_token = next_token + + def __iter__(self): + return iter(self.manager.query(self)) + + def next(self): + if self.__local_iter__ == None: + self.__local_iter__ = self.__iter__() + return self.__local_iter__.next() + + def filter(self, property_operator, value): + self.filters.append((property_operator, value)) + return self + + def fetch(self, limit, offset=0): + """Not currently fully supported, but we can use this + to allow them to set a limit in a chainable method""" + self.limit = limit + self.offset = offset + return self + + def count(self, quick=True): + return self.manager.count(self.model_class, self.filters, quick, self.sort_by, self.select) + + def get_query(self): + return self.manager._build_filter_part(self.model_class, self.filters, self.sort_by, self.select) + + def order(self, key): + self.sort_by = key + return self + + def to_xml(self, doc=None): + if not doc: + xmlmanager = self.model_class.get_xmlmanager() + doc = xmlmanager.new_doc() + for obj in self: + obj.to_xml(doc) + return doc + + def get_next_token(self): + if self.rs: + return self.rs.next_token + if self._next_token: + return self._next_token + return None + + def set_next_token(self, token): + self._next_token = token + + next_token = property(get_next_token, set_next_token) diff --git a/awx/lib/site-packages/boto/sdb/db/sequence.py b/awx/lib/site-packages/boto/sdb/db/sequence.py new file mode 100644 index 0000000000..121512f208 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/sequence.py @@ -0,0 +1,226 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import SDBResponseError + +class SequenceGenerator(object): + """Generic Sequence Generator object, this takes a single + string as the "sequence" and uses that to figure out + what the next value in a string is. For example + if you give "ABC" and pass in "A" it will give you "B", + and if you give it "C" it will give you "AA". + + If you set "rollover" to True in the above example, passing + in "C" would give you "A" again. + + The Sequence string can be a string or any iterable + that has the "index" function and is indexable. + """ + __name__ = "SequenceGenerator" + + def __init__(self, sequence_string, rollover=False): + """Create a new SequenceGenerator using the sequence_string + as how to generate the next item. + + :param sequence_string: The string or list that explains + how to generate the next item in the sequence + :type sequence_string: str,iterable + + :param rollover: Rollover instead of incrementing when + we hit the end of the sequence + :type rollover: bool + """ + self.sequence_string = sequence_string + self.sequence_length = len(sequence_string[0]) + self.rollover = rollover + self.last_item = sequence_string[-1] + self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string) + + def __call__(self, val, last=None): + """Get the next value in the sequence""" + # If they pass us in a string that's not at least + # the lenght of our sequence, then return the + # first element in our sequence + if val == None or len(val) < self.sequence_length: + return self.sequence_string[0] + last_value = val[-self.sequence_length:] + if (not self.rollover) and (last_value == self.last_item): + val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value)) + else: + val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value)) + return val + + def _inc(self, val): + """Increment a single value""" + assert(len(val) == self.sequence_length) + return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)] + + + +# +# Simple Sequence Functions +# +def increment_by_one(cv=None, lv=None): + if cv == None: + return 0 + return cv + 1 + +def double(cv=None, lv=None): + if cv == None: + return 1 + return cv * 2 + +def fib(cv=1, lv=0): + """The fibonacci sequence, this incrementer uses the + last value""" + if cv == None: + cv = 1 + if lv == None: + lv = 0 + return cv + lv + +increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + + + +class Sequence(object): + """A simple Sequence using the new SDB "Consistent" features + Based largly off of the "Counter" example from mitch garnaat: + http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py""" + + + def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None): + """Create a new Sequence, using an optional function to + increment to the next number, by default we just increment by one. + Every parameter here is optional, if you don't specify any options + then you'll get a new SequenceGenerator with a random ID stored in the + default domain that increments by one and uses the default botoweb + environment + + :param id: Optional ID (name) for this counter + :type id: str + + :param domain_name: Optional domain name to use, by default we get this out of the + environment configuration + :type domain_name:str + + :param fnc: Optional function to use for the incrementation, by default we just increment by one + There are several functions defined in this module. + Your function must accept "None" to get the initial value + :type fnc: function, str + + :param init_val: Initial value, by default this is the first element in your sequence, + but you can pass in any value, even a string if you pass in a function that uses + strings instead of ints to increment + """ + self._db = None + self._value = None + self.last_value = None + self.domain_name = domain_name + self.id = id + if init_val == None: + init_val = fnc(init_val) + + if self.id == None: + import uuid + self.id = str(uuid.uuid4()) + + self.item_type = type(fnc(None)) + self.timestamp = None + # Allow us to pass in a full name to a function + if isinstance(fnc, str): + from boto.utils import find_class + fnc = find_class(fnc) + self.fnc = fnc + + # Bootstrap the value last + if not self.val: + self.val = init_val + + def set(self, val): + """Set the value""" + import time + now = time.time() + expected_value = [] + new_val = {} + new_val['timestamp'] = now + if self._value != None: + new_val['last_value'] = self._value + expected_value = ['current_value', str(self._value)] + new_val['current_value'] = val + try: + self.db.put_attributes(self.id, new_val, expected_value=expected_value) + self.timestamp = new_val['timestamp'] + except SDBResponseError, e: + if e.status == 409: + raise ValueError("Sequence out of sync") + else: + raise + + + def get(self): + """Get the value""" + val = self.db.get_attributes(self.id, consistent_read=True) + if val: + if 'timestamp' in val: + self.timestamp = val['timestamp'] + if 'current_value' in val: + self._value = self.item_type(val['current_value']) + if "last_value" in val and val['last_value'] != None: + self.last_value = self.item_type(val['last_value']) + return self._value + + val = property(get, set) + + def __repr__(self): + return "%s('%s', '%s', '%s.%s', '%s')" % ( + self.__class__.__name__, + self.id, + self.domain_name, + self.fnc.__module__, self.fnc.__name__, + self.val) + + + def _connect(self): + """Connect to our domain""" + if not self._db: + import boto + sdb = boto.connect_sdb() + if not self.domain_name: + self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default")) + try: + self._db = sdb.get_domain(self.domain_name) + except SDBResponseError, e: + if e.status == 400: + self._db = sdb.create_domain(self.domain_name) + else: + raise + return self._db + + db = property(_connect) + + def next(self): + self.val = self.fnc(self.val, self.last_value) + return self.val + + def delete(self): + """Remove this sequence""" + self.db.delete_attributes(self.id) diff --git a/awx/lib/site-packages/boto/sdb/db/test_db.py b/awx/lib/site-packages/boto/sdb/db/test_db.py new file mode 100644 index 0000000000..b582bcee6d --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/db/test_db.py @@ -0,0 +1,231 @@ +import logging +import time +from datetime import datetime + +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty +from boto.sdb.db.property import DateTimeProperty, FloatProperty, ReferenceProperty +from boto.sdb.db.property import PasswordProperty, ListProperty, MapProperty +from boto.exception import SDBPersistenceError + +logging.basicConfig() +log = logging.getLogger('test_db') +log.setLevel(logging.DEBUG) + +_objects = {} + +# +# This will eventually be moved to the boto.tests module and become a real unit test +# but for now it will live here. It shows examples of each of the Property types in +# use and tests the basic operations. +# +class TestBasic(Model): + + name = StringProperty() + size = IntegerProperty() + foo = BooleanProperty() + date = DateTimeProperty() + +class TestFloat(Model): + + name = StringProperty() + value = FloatProperty() + +class TestRequired(Model): + + req = StringProperty(required=True, default='foo') + +class TestReference(Model): + + ref = ReferenceProperty(reference_class=TestBasic, collection_name='refs') + +class TestSubClass(TestBasic): + + answer = IntegerProperty() + +class TestPassword(Model): + password = PasswordProperty() + +class TestList(Model): + + name = StringProperty() + nums = ListProperty(int) + +class TestMap(Model): + + name = StringProperty() + map = MapProperty() + +class TestListReference(Model): + + name = StringProperty() + basics = ListProperty(TestBasic) + +class TestAutoNow(Model): + + create_date = DateTimeProperty(auto_now_add=True) + modified_date = DateTimeProperty(auto_now=True) + +class TestUnique(Model): + name = StringProperty(unique=True) + +def test_basic(): + global _objects + t = TestBasic() + t.name = 'simple' + t.size = -42 + t.foo = True + t.date = datetime.now() + log.debug('saving object') + t.put() + _objects['test_basic_t'] = t + time.sleep(5) + log.debug('now try retrieving it') + tt = TestBasic.get_by_id(t.id) + _objects['test_basic_tt'] = tt + assert tt.id == t.id + l = TestBasic.get_by_id([t.id]) + assert len(l) == 1 + assert l[0].id == t.id + assert t.size == tt.size + assert t.foo == tt.foo + assert t.name == tt.name + #assert t.date == tt.date + return t + +def test_float(): + global _objects + t = TestFloat() + t.name = 'float object' + t.value = 98.6 + log.debug('saving object') + t.save() + _objects['test_float_t'] = t + time.sleep(5) + log.debug('now try retrieving it') + tt = TestFloat.get_by_id(t.id) + _objects['test_float_tt'] = tt + assert tt.id == t.id + assert tt.name == t.name + assert tt.value == t.value + return t + +def test_required(): + global _objects + t = TestRequired() + _objects['test_required_t'] = t + t.put() + return t + +def test_reference(t=None): + global _objects + if not t: + t = test_basic() + tt = TestReference() + tt.ref = t + tt.put() + time.sleep(10) + tt = TestReference.get_by_id(tt.id) + _objects['test_reference_tt'] = tt + assert tt.ref.id == t.id + for o in t.refs: + log.debug(o) + +def test_subclass(): + global _objects + t = TestSubClass() + _objects['test_subclass_t'] = t + t.name = 'a subclass' + t.size = -489 + t.save() + +def test_password(): + global _objects + t = TestPassword() + _objects['test_password_t'] = t + t.password = "foo" + t.save() + time.sleep(5) + # Make sure it stored ok + tt = TestPassword.get_by_id(t.id) + _objects['test_password_tt'] = tt + #Testing password equality + assert tt.password == "foo" + #Testing password not stored as string + assert str(tt.password) != "foo" + +def test_list(): + global _objects + t = TestList() + _objects['test_list_t'] = t + t.name = 'a list of ints' + t.nums = [1, 2, 3, 4, 5] + t.put() + tt = TestList.get_by_id(t.id) + _objects['test_list_tt'] = tt + assert tt.name == t.name + for n in tt.nums: + assert isinstance(n, int) + +def test_list_reference(): + global _objects + t = TestBasic() + t.put() + _objects['test_list_ref_t'] = t + tt = TestListReference() + tt.name = "foo" + tt.basics = [t] + tt.put() + time.sleep(5) + _objects['test_list_ref_tt'] = tt + ttt = TestListReference.get_by_id(tt.id) + assert ttt.basics[0].id == t.id + +def test_unique(): + global _objects + t = TestUnique() + name = 'foo' + str(int(time.time())) + t.name = name + t.put() + _objects['test_unique_t'] = t + time.sleep(10) + tt = TestUnique() + _objects['test_unique_tt'] = tt + tt.name = name + try: + tt.put() + assert False + except(SDBPersistenceError): + pass + +def test_datetime(): + global _objects + t = TestAutoNow() + t.put() + _objects['test_datetime_t'] = t + time.sleep(5) + tt = TestAutoNow.get_by_id(t.id) + assert tt.create_date.timetuple() == t.create_date.timetuple() + +def test(): + log.info('test_basic') + t1 = test_basic() + log.info('test_required') + test_required() + log.info('test_reference') + test_reference(t1) + log.info('test_subclass') + test_subclass() + log.info('test_password') + test_password() + log.info('test_list') + test_list() + log.info('test_list_reference') + test_list_reference() + log.info("test_datetime") + test_datetime() + log.info('test_unique') + test_unique() + +if __name__ == "__main__": + test() diff --git a/awx/lib/site-packages/boto/sdb/domain.py b/awx/lib/site-packages/boto/sdb/domain.py new file mode 100644 index 0000000000..d4faf04620 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/domain.py @@ -0,0 +1,377 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SDB Domain +""" +from boto.sdb.queryresultset import SelectResultSet + +class Domain: + + def __init__(self, connection=None, name=None): + self.connection = connection + self.name = name + self._metadata = None + + def __repr__(self): + return 'Domain:%s' % self.name + + def __iter__(self): + return iter(self.select("SELECT * FROM `%s`" % self.name)) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DomainName': + self.name = value + else: + setattr(self, name, value) + + def get_metadata(self): + if not self._metadata: + self._metadata = self.connection.domain_metadata(self) + return self._metadata + + def put_attributes(self, item_name, attributes, + replace=True, expected_value=None): + """ + Store attributes for a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being stored. + + :type attribute_names: dict or dict-like object + :param attribute_names: The name/value pairs to store as attributes + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute + "name" of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or non-existence + (False) of the attribute. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + return self.connection.put_attributes(self, item_name, attributes, + replace, expected_value) + + def batch_put_attributes(self, items, replace=True): + """ + Store attributes for multiple items. + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are themselves dictionaries + of attribute names/values, exactly the same as the + attribute_names parameter of the scalar put_attributes + call. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + return self.connection.batch_put_attributes(self, items, replace) + + def get_attributes(self, item_name, attribute_name=None, + consistent_read=False, item=None): + """ + Retrieve attributes for a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being retrieved. + + :type attribute_names: string or list of strings + :param attribute_names: An attribute name or list of attribute names. This + parameter is optional. If not supplied, all attributes + will be retrieved for the item. + + :rtype: :class:`boto.sdb.item.Item` + :return: An Item mapping type containing the requested attribute name/values + """ + return self.connection.get_attributes(self, item_name, attribute_name, + consistent_read, item) + + def delete_attributes(self, item_name, attributes=None, + expected_values=None): + """ + Delete attributes from a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being deleted. + + :type attributes: dict, list or :class:`boto.sdb.item.Item` + :param attributes: Either a list containing attribute names which will cause + all values associated with that attribute name to be deleted or + a dict or Item containing the attribute names and keys and list + of values to delete as the value. If no value is supplied, + all attribute name/values for the item will be deleted. + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be of + the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :rtype: bool + :return: True if successful + """ + return self.connection.delete_attributes(self, item_name, attributes, + expected_values) + + def batch_delete_attributes(self, items): + """ + Delete multiple items in this domain. + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are either: + + * dictionaries of attribute names/values, exactly the + same as the attribute_names parameter of the scalar + put_attributes call. The attribute name/value pairs + will only be deleted if they match the name/value + pairs passed in. + * None which means that all attributes associated + with the item should be deleted. + + :rtype: bool + :return: True if successful + """ + return self.connection.batch_delete_attributes(self, items) + + def select(self, query='', next_token=None, consistent_read=False, max_items=None): + """ + Returns a set of Attributes for item names within domain_name that match the query. + The query must be expressed in using the SELECT style syntax rather than the + original SimpleDB query language. + + :type query: string + :param query: The SimpleDB query to be performed. + + :rtype: iter + :return: An iterator containing the results. This is actually a generator + function that will iterate across all search results, not just the + first page. + """ + return SelectResultSet(self, query, max_items=max_items, next_token=next_token, + consistent_read=consistent_read) + + def get_item(self, item_name, consistent_read=False): + """ + Retrieves an item from the domain, along with all of its attributes. + + :param string item_name: The name of the item to retrieve. + :rtype: :class:`boto.sdb.item.Item` or ``None`` + :keyword bool consistent_read: When set to true, ensures that the most + recent data is returned. + :return: The requested item, or ``None`` if there was no match found + """ + item = self.get_attributes(item_name, consistent_read=consistent_read) + if item: + item.domain = self + return item + else: + return None + + def new_item(self, item_name): + return self.connection.item_cls(self, item_name) + + def delete_item(self, item): + self.delete_attributes(item.name) + + def to_xml(self, f=None): + """Get this domain as an XML DOM Document + :param f: Optional File to dump directly to + :type f: File or Stream + + :return: File object where the XML has been dumped to + :rtype: file + """ + if not f: + from tempfile import TemporaryFile + f = TemporaryFile() + print >> f, '' + print >> f, '' % self.name + for item in self: + print >> f, '\t' % item.name + for k in item: + print >> f, '\t\t' % k + values = item[k] + if not isinstance(values, list): + values = [values] + for value in values: + print >> f, '\t\t\t> f, ']]>' + print >> f, '\t\t' + print >> f, '\t' + print >> f, '' + f.flush() + f.seek(0) + return f + + + def from_xml(self, doc): + """Load this domain based on an XML document""" + import xml.sax + handler = DomainDumpParser(self) + xml.sax.parse(doc, handler) + return handler + + def delete(self): + """ + Delete this domain, and all items under it + """ + return self.connection.delete_domain(self) + + +class DomainMetaData: + + def __init__(self, domain=None): + self.domain = domain + self.item_count = None + self.item_names_size = None + self.attr_name_count = None + self.attr_names_size = None + self.attr_value_count = None + self.attr_values_size = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ItemCount': + self.item_count = int(value) + elif name == 'ItemNamesSizeBytes': + self.item_names_size = int(value) + elif name == 'AttributeNameCount': + self.attr_name_count = int(value) + elif name == 'AttributeNamesSizeBytes': + self.attr_names_size = int(value) + elif name == 'AttributeValueCount': + self.attr_value_count = int(value) + elif name == 'AttributeValuesSizeBytes': + self.attr_values_size = int(value) + elif name == 'Timestamp': + self.timestamp = value + else: + setattr(self, name, value) + +import sys +from xml.sax.handler import ContentHandler +class DomainDumpParser(ContentHandler): + """ + SAX parser for a domain that has been dumped + """ + + def __init__(self, domain): + self.uploader = UploaderThread(domain) + self.item_id = None + self.attrs = {} + self.attribute = None + self.value = "" + self.domain = domain + + def startElement(self, name, attrs): + if name == "Item": + self.item_id = attrs['id'] + self.attrs = {} + elif name == "attribute": + self.attribute = attrs['id'] + elif name == "value": + self.value = "" + + def characters(self, ch): + self.value += ch + + def endElement(self, name): + if name == "value": + if self.value and self.attribute: + value = self.value.strip() + attr_name = self.attribute.strip() + if attr_name in self.attrs: + self.attrs[attr_name].append(value) + else: + self.attrs[attr_name] = [value] + elif name == "Item": + self.uploader.items[self.item_id] = self.attrs + # Every 20 items we spawn off the uploader + if len(self.uploader.items) >= 20: + self.uploader.start() + self.uploader = UploaderThread(self.domain) + elif name == "Domain": + # If we're done, spawn off our last Uploader Thread + self.uploader.start() + +from threading import Thread +class UploaderThread(Thread): + """Uploader Thread""" + + def __init__(self, domain): + self.db = domain + self.items = {} + Thread.__init__(self) + + def run(self): + try: + self.db.batch_put_attributes(self.items) + except: + print "Exception using batch put, trying regular put instead" + for item_name in self.items: + self.db.put_attributes(item_name, self.items[item_name]) + print ".", + sys.stdout.flush() diff --git a/awx/lib/site-packages/boto/sdb/item.py b/awx/lib/site-packages/boto/sdb/item.py new file mode 100644 index 0000000000..999c7f0b31 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/item.py @@ -0,0 +1,181 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import base64 + +class Item(dict): + """ + A ``dict`` sub-class that serves as an object representation of a + SimpleDB item. An item in SDB is similar to a row in a relational + database. Items belong to a :py:class:`Domain `, + which is similar to a table in a relational database. + + The keys on instances of this object correspond to attributes that are + stored on the SDB item. + + .. tip:: While it is possible to instantiate this class directly, you may + want to use the convenience methods on :py:class:`boto.sdb.domain.Domain` + for that purpose. For example, :py:meth:`boto.sdb.domain.Domain.get_item`. + """ + def __init__(self, domain, name='', active=False): + """ + :type domain: :py:class:`boto.sdb.domain.Domain` + :param domain: The domain that this item belongs to. + + :param str name: The name of this item. This name will be used when + querying for items using methods like + :py:meth:`boto.sdb.domain.Domain.get_item` + """ + dict.__init__(self) + self.domain = domain + self.name = name + self.active = active + self.request_id = None + self.encoding = None + self.in_attribute = False + self.converter = self.domain.connection.converter + + def startElement(self, name, attrs, connection): + if name == 'Attribute': + self.in_attribute = True + self.encoding = attrs.get('encoding', None) + return None + + def decode_value(self, value): + if self.encoding == 'base64': + self.encoding = None + return base64.decodestring(value) + else: + return value + + def endElement(self, name, value, connection): + if name == 'ItemName': + self.name = self.decode_value(value) + elif name == 'Name': + if self.in_attribute: + self.last_key = self.decode_value(value) + else: + self.name = self.decode_value(value) + elif name == 'Value': + if self.last_key in self: + if not isinstance(self[self.last_key], list): + self[self.last_key] = [self[self.last_key]] + value = self.decode_value(value) + if self.converter: + value = self.converter.decode(value) + self[self.last_key].append(value) + else: + value = self.decode_value(value) + if self.converter: + value = self.converter.decode(value) + self[self.last_key] = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'RequestId': + self.request_id = value + elif name == 'Attribute': + self.in_attribute = False + else: + setattr(self, name, value) + + def load(self): + """ + Loads or re-loads this item's attributes from SDB. + + .. warning:: + If you have changed attribute values on an Item instance, + this method will over-write the values if they are different in + SDB. For any local attributes that don't yet exist in SDB, + they will be safe. + """ + self.domain.get_attributes(self.name, item=self) + + def save(self, replace=True): + """ + Saves this item to SDB. + + :param bool replace: If ``True``, delete any attributes on the remote + SDB item that have a ``None`` value on this object. + """ + self.domain.put_attributes(self.name, self, replace) + # Delete any attributes set to "None" + if replace: + del_attrs = [] + for name in self: + if self[name] == None: + del_attrs.append(name) + if len(del_attrs) > 0: + self.domain.delete_attributes(self.name, del_attrs) + + def add_value(self, key, value): + """ + Helps set or add to attributes on this item. If you are adding a new + attribute that has yet to be set, it will simply create an attribute + named ``key`` with your given ``value`` as its value. If you are + adding a value to an existing attribute, this method will convert the + attribute to a list (if it isn't already) and append your new value + to said list. + + For clarification, consider the following interactive session: + + .. code-block:: python + + >>> item = some_domain.get_item('some_item') + >>> item.has_key('some_attr') + False + >>> item.add_value('some_attr', 1) + >>> item['some_attr'] + 1 + >>> item.add_value('some_attr', 2) + >>> item['some_attr'] + [1, 2] + + :param str key: The attribute to add a value to. + :param object value: The value to set or append to the attribute. + """ + if key in self: + # We already have this key on the item. + if not isinstance(self[key], list): + # The key isn't already a list, take its current value and + # convert it to a list with the only member being the + # current value. + self[key] = [self[key]] + # Add the new value to the list. + self[key].append(value) + else: + # This is a new attribute, just set it. + self[key] = value + + def delete(self): + """ + Deletes this item in SDB. + + .. note:: This local Python object remains in its current state + after deletion, this only deletes the remote item in SDB. + """ + self.domain.delete_item(self) + + + + diff --git a/awx/lib/site-packages/boto/sdb/queryresultset.py b/awx/lib/site-packages/boto/sdb/queryresultset.py new file mode 100644 index 0000000000..10bafd1c92 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/queryresultset.py @@ -0,0 +1,92 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def query_lister(domain, query='', max_items=None, attr_names=None): + more_results = True + num_results = 0 + next_token = None + while more_results: + rs = domain.connection.query_with_attributes(domain, query, attr_names, + next_token=next_token) + for item in rs: + if max_items: + if num_results == max_items: + raise StopIteration + yield item + num_results += 1 + next_token = rs.next_token + more_results = next_token != None + +class QueryResultSet: + + def __init__(self, domain=None, query='', max_items=None, attr_names=None): + self.max_items = max_items + self.domain = domain + self.query = query + self.attr_names = attr_names + + def __iter__(self): + return query_lister(self.domain, self.query, self.max_items, self.attr_names) + +def select_lister(domain, query='', max_items=None): + more_results = True + num_results = 0 + next_token = None + while more_results: + rs = domain.connection.select(domain, query, next_token=next_token) + for item in rs: + if max_items: + if num_results == max_items: + raise StopIteration + yield item + num_results += 1 + next_token = rs.next_token + more_results = next_token != None + +class SelectResultSet(object): + + def __init__(self, domain=None, query='', max_items=None, + next_token=None, consistent_read=False): + self.domain = domain + self.query = query + self.consistent_read = consistent_read + self.max_items = max_items + self.next_token = next_token + + def __iter__(self): + more_results = True + num_results = 0 + while more_results: + rs = self.domain.connection.select(self.domain, self.query, + next_token=self.next_token, + consistent_read=self.consistent_read) + for item in rs: + if self.max_items and num_results >= self.max_items: + raise StopIteration + yield item + num_results += 1 + self.next_token = rs.next_token + if self.max_items and num_results >= self.max_items: + raise StopIteration + more_results = self.next_token != None + + def next(self): + return self.__iter__().next() diff --git a/awx/lib/site-packages/boto/sdb/regioninfo.py b/awx/lib/site-packages/boto/sdb/regioninfo.py new file mode 100644 index 0000000000..5c32864d28 --- /dev/null +++ b/awx/lib/site-packages/boto/sdb/regioninfo.py @@ -0,0 +1,32 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class SDBRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None): + from boto.sdb.connection import SDBConnection + RegionInfo.__init__(self, connection, name, endpoint, + SDBConnection) diff --git a/awx/lib/site-packages/boto/services/__init__.py b/awx/lib/site-packages/boto/services/__init__.py new file mode 100644 index 0000000000..449bd162a8 --- /dev/null +++ b/awx/lib/site-packages/boto/services/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/awx/lib/site-packages/boto/services/bs.py b/awx/lib/site-packages/boto/services/bs.py new file mode 100644 index 0000000000..3d700315db --- /dev/null +++ b/awx/lib/site-packages/boto/services/bs.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from optparse import OptionParser +from boto.services.servicedef import ServiceDef +from boto.services.submit import Submitter +from boto.services.result import ResultProcessor +import boto +import sys, os, StringIO + +class BS(object): + + Usage = "usage: %prog [options] config_file command" + + Commands = {'reset' : 'Clear input queue and output bucket', + 'submit' : 'Submit local files to the service', + 'start' : 'Start the service', + 'status' : 'Report on the status of the service buckets and queues', + 'retrieve' : 'Retrieve output generated by a batch', + 'batches' : 'List all batches stored in current output_domain'} + + def __init__(self): + self.service_name = None + self.parser = OptionParser(usage=self.Usage) + self.parser.add_option("--help-commands", action="store_true", dest="help_commands", + help="provides help on the available commands") + self.parser.add_option("-a", "--access-key", action="store", type="string", + help="your AWS Access Key") + self.parser.add_option("-s", "--secret-key", action="store", type="string", + help="your AWS Secret Access Key") + self.parser.add_option("-p", "--path", action="store", type="string", dest="path", + help="the path to local directory for submit and retrieve") + self.parser.add_option("-k", "--keypair", action="store", type="string", dest="keypair", + help="the SSH keypair used with launched instance(s)") + self.parser.add_option("-l", "--leave", action="store_true", dest="leave", + help="leave the files (don't retrieve) files during retrieve command") + self.parser.set_defaults(leave=False) + self.parser.add_option("-n", "--num-instances", action="store", type="string", dest="num_instances", + help="the number of launched instance(s)") + self.parser.set_defaults(num_instances=1) + self.parser.add_option("-i", "--ignore-dirs", action="append", type="string", dest="ignore", + help="directories that should be ignored by submit command") + self.parser.add_option("-b", "--batch-id", action="store", type="string", dest="batch", + help="batch identifier required by the retrieve command") + + def print_command_help(self): + print '\nCommands:' + for key in self.Commands.keys(): + print ' %s\t\t%s' % (key, self.Commands[key]) + + def do_reset(self): + iq = self.sd.get_obj('input_queue') + if iq: + print 'clearing out input queue' + i = 0 + m = iq.read() + while m: + i += 1 + iq.delete_message(m) + m = iq.read() + print 'deleted %d messages' % i + ob = self.sd.get_obj('output_bucket') + ib = self.sd.get_obj('input_bucket') + if ob: + if ib and ob.name == ib.name: + return + print 'delete generated files in output bucket' + i = 0 + for k in ob: + i += 1 + k.delete() + print 'deleted %d keys' % i + + def do_submit(self): + if not self.options.path: + self.parser.error('No path provided') + if not os.path.exists(self.options.path): + self.parser.error('Invalid path (%s)' % self.options.path) + s = Submitter(self.sd) + t = s.submit_path(self.options.path, None, self.options.ignore, None, + None, True, self.options.path) + print 'A total of %d files were submitted' % t[1] + print 'Batch Identifier: %s' % t[0] + + def do_start(self): + ami_id = self.sd.get('ami_id') + instance_type = self.sd.get('instance_type', 'm1.small') + security_group = self.sd.get('security_group', 'default') + if not ami_id: + self.parser.error('ami_id option is required when starting the service') + ec2 = boto.connect_ec2() + if not self.sd.has_section('Credentials'): + self.sd.add_section('Credentials') + self.sd.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id) + self.sd.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key) + s = StringIO.StringIO() + self.sd.write(s) + rs = ec2.get_all_images([ami_id]) + img = rs[0] + r = img.run(user_data=s.getvalue(), key_name=self.options.keypair, + max_count=self.options.num_instances, + instance_type=instance_type, + security_groups=[security_group]) + print 'Starting AMI: %s' % ami_id + print 'Reservation %s contains the following instances:' % r.id + for i in r.instances: + print '\t%s' % i.id + + def do_status(self): + iq = self.sd.get_obj('input_queue') + if iq: + print 'The input_queue (%s) contains approximately %s messages' % (iq.id, iq.count()) + ob = self.sd.get_obj('output_bucket') + ib = self.sd.get_obj('input_bucket') + if ob: + if ib and ob.name == ib.name: + return + total = 0 + for k in ob: + total += 1 + print 'The output_bucket (%s) contains %d keys' % (ob.name, total) + + def do_retrieve(self): + if not self.options.path: + self.parser.error('No path provided') + if not os.path.exists(self.options.path): + self.parser.error('Invalid path (%s)' % self.options.path) + if not self.options.batch: + self.parser.error('batch identifier is required for retrieve command') + s = ResultProcessor(self.options.batch, self.sd) + s.get_results(self.options.path, get_file=(not self.options.leave)) + + def do_batches(self): + d = self.sd.get_obj('output_domain') + if d: + print 'Available Batches:' + rs = d.query("['type'='Batch']") + for item in rs: + print ' %s' % item.name + else: + self.parser.error('No output_domain specified for service') + + def main(self): + self.options, self.args = self.parser.parse_args() + if self.options.help_commands: + self.print_command_help() + sys.exit(0) + if len(self.args) != 2: + self.parser.error("config_file and command are required") + self.config_file = self.args[0] + self.sd = ServiceDef(self.config_file) + self.command = self.args[1] + if hasattr(self, 'do_%s' % self.command): + method = getattr(self, 'do_%s' % self.command) + method() + else: + self.parser.error('command (%s) not recognized' % self.command) + +if __name__ == "__main__": + bs = BS() + bs.main() diff --git a/awx/lib/site-packages/boto/services/message.py b/awx/lib/site-packages/boto/services/message.py new file mode 100644 index 0000000000..79f6d19f66 --- /dev/null +++ b/awx/lib/site-packages/boto/services/message.py @@ -0,0 +1,58 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sqs.message import MHMessage +from boto.utils import get_ts +from socket import gethostname +import os, mimetypes, time + +class ServiceMessage(MHMessage): + + def for_key(self, key, params=None, bucket_name=None): + if params: + self.update(params) + if key.path: + t = os.path.split(key.path) + self['OriginalLocation'] = t[0] + self['OriginalFileName'] = t[1] + mime_type = mimetypes.guess_type(t[1])[0] + if mime_type == None: + mime_type = 'application/octet-stream' + self['Content-Type'] = mime_type + s = os.stat(key.path) + t = time.gmtime(s[7]) + self['FileAccessedDate'] = get_ts(t) + t = time.gmtime(s[8]) + self['FileModifiedDate'] = get_ts(t) + t = time.gmtime(s[9]) + self['FileCreateDate'] = get_ts(t) + else: + self['OriginalFileName'] = key.name + self['OriginalLocation'] = key.bucket.name + self['ContentType'] = key.content_type + self['Host'] = gethostname() + if bucket_name: + self['Bucket'] = bucket_name + else: + self['Bucket'] = key.bucket.name + self['InputKey'] = key.name + self['Size'] = key.size + diff --git a/awx/lib/site-packages/boto/services/result.py b/awx/lib/site-packages/boto/services/result.py new file mode 100644 index 0000000000..4854976478 --- /dev/null +++ b/awx/lib/site-packages/boto/services/result.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os +from datetime import datetime, timedelta +from boto.utils import parse_ts +import boto + +class ResultProcessor: + + LogFileName = 'log.csv' + + def __init__(self, batch_name, sd, mimetype_files=None): + self.sd = sd + self.batch = batch_name + self.log_fp = None + self.num_files = 0 + self.total_time = 0 + self.min_time = timedelta.max + self.max_time = timedelta.min + self.earliest_time = datetime.max + self.latest_time = datetime.min + self.queue = self.sd.get_obj('output_queue') + self.domain = self.sd.get_obj('output_domain') + + def calculate_stats(self, msg): + start_time = parse_ts(msg['Service-Read']) + end_time = parse_ts(msg['Service-Write']) + elapsed_time = end_time - start_time + if elapsed_time > self.max_time: + self.max_time = elapsed_time + if elapsed_time < self.min_time: + self.min_time = elapsed_time + self.total_time += elapsed_time.seconds + if start_time < self.earliest_time: + self.earliest_time = start_time + if end_time > self.latest_time: + self.latest_time = end_time + + def log_message(self, msg, path): + keys = sorted(msg.keys()) + if not self.log_fp: + self.log_fp = open(os.path.join(path, self.LogFileName), 'a') + line = ','.join(keys) + self.log_fp.write(line+'\n') + values = [] + for key in keys: + value = msg[key] + if value.find(',') > 0: + value = '"%s"' % value + values.append(value) + line = ','.join(values) + self.log_fp.write(line+'\n') + + def process_record(self, record, path, get_file=True): + self.log_message(record, path) + self.calculate_stats(record) + outputs = record['OutputKey'].split(',') + if 'OutputBucket' in record: + bucket = boto.lookup('s3', record['OutputBucket']) + else: + bucket = boto.lookup('s3', record['Bucket']) + for output in outputs: + if get_file: + key_name = output.split(';')[0] + key = bucket.lookup(key_name) + file_name = os.path.join(path, key_name) + print 'retrieving file: %s to %s' % (key_name, file_name) + key.get_contents_to_filename(file_name) + self.num_files += 1 + + def get_results_from_queue(self, path, get_file=True, delete_msg=True): + m = self.queue.read() + while m: + if 'Batch' in m and m['Batch'] == self.batch: + self.process_record(m, path, get_file) + if delete_msg: + self.queue.delete_message(m) + m = self.queue.read() + + def get_results_from_domain(self, path, get_file=True): + rs = self.domain.query("['Batch'='%s']" % self.batch) + for item in rs: + self.process_record(item, path, get_file) + + def get_results_from_bucket(self, path): + bucket = self.sd.get_obj('output_bucket') + if bucket: + print 'No output queue or domain, just retrieving files from output_bucket' + for key in bucket: + file_name = os.path.join(path, key) + print 'retrieving file: %s to %s' % (key, file_name) + key.get_contents_to_filename(file_name) + self.num_files + 1 + + def get_results(self, path, get_file=True, delete_msg=True): + if not os.path.isdir(path): + os.mkdir(path) + if self.queue: + self.get_results_from_queue(path, get_file) + elif self.domain: + self.get_results_from_domain(path, get_file) + else: + self.get_results_from_bucket(path) + if self.log_fp: + self.log_fp.close() + print '%d results successfully retrieved.' % self.num_files + if self.num_files > 0: + self.avg_time = float(self.total_time)/self.num_files + print 'Minimum Processing Time: %d' % self.min_time.seconds + print 'Maximum Processing Time: %d' % self.max_time.seconds + print 'Average Processing Time: %f' % self.avg_time + self.elapsed_time = self.latest_time-self.earliest_time + print 'Elapsed Time: %d' % self.elapsed_time.seconds + tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files) + print 'Throughput: %f transactions / minute' % tput + diff --git a/awx/lib/site-packages/boto/services/service.py b/awx/lib/site-packages/boto/services/service.py new file mode 100644 index 0000000000..e0e987ce86 --- /dev/null +++ b/awx/lib/site-packages/boto/services/service.py @@ -0,0 +1,161 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.services.message import ServiceMessage +from boto.services.servicedef import ServiceDef +from boto.pyami.scriptbase import ScriptBase +from boto.utils import get_ts +import time +import os +import mimetypes + + +class Service(ScriptBase): + + # Time required to process a transaction + ProcessingTime = 60 + + def __init__(self, config_file=None, mimetype_files=None): + ScriptBase.__init__(self, config_file) + self.name = self.__class__.__name__ + self.working_dir = boto.config.get('Pyami', 'working_dir') + self.sd = ServiceDef(config_file) + self.retry_count = self.sd.getint('retry_count', 5) + self.loop_delay = self.sd.getint('loop_delay', 30) + self.processing_time = self.sd.getint('processing_time', 60) + self.input_queue = self.sd.get_obj('input_queue') + self.output_queue = self.sd.get_obj('output_queue') + self.output_domain = self.sd.get_obj('output_domain') + if mimetype_files: + mimetypes.init(mimetype_files) + + def split_key(key): + if key.find(';') < 0: + t = (key, '') + else: + key, type = key.split(';') + label, mtype = type.split('=') + t = (key, mtype) + return t + + def read_message(self): + boto.log.info('read_message') + message = self.input_queue.read(self.processing_time) + if message: + boto.log.info(message.get_body()) + key = 'Service-Read' + message[key] = get_ts() + return message + + # retrieve the source file from S3 + def get_file(self, message): + bucket_name = message['Bucket'] + key_name = message['InputKey'] + file_name = os.path.join(self.working_dir, message.get('OriginalFileName', 'in_file')) + boto.log.info('get_file: %s/%s to %s' % (bucket_name, key_name, file_name)) + bucket = boto.lookup('s3', bucket_name) + key = bucket.new_key(key_name) + key.get_contents_to_filename(os.path.join(self.working_dir, file_name)) + return file_name + + # process source file, return list of output files + def process_file(self, in_file_name, msg): + return [] + + # store result file in S3 + def put_file(self, bucket_name, file_path, key_name=None): + boto.log.info('putting file %s as %s.%s' % (file_path, bucket_name, key_name)) + bucket = boto.lookup('s3', bucket_name) + key = bucket.new_key(key_name) + key.set_contents_from_filename(file_path) + return key + + def save_results(self, results, input_message, output_message): + output_keys = [] + for file, type in results: + if 'OutputBucket' in input_message: + output_bucket = input_message['OutputBucket'] + else: + output_bucket = input_message['Bucket'] + key_name = os.path.split(file)[1] + key = self.put_file(output_bucket, file, key_name) + output_keys.append('%s;type=%s' % (key.name, type)) + output_message['OutputKey'] = ','.join(output_keys) + + # write message to each output queue + def write_message(self, message): + message['Service-Write'] = get_ts() + message['Server'] = self.name + if 'HOSTNAME' in os.environ: + message['Host'] = os.environ['HOSTNAME'] + else: + message['Host'] = 'unknown' + message['Instance-ID'] = self.instance_id + if self.output_queue: + boto.log.info('Writing message to SQS queue: %s' % self.output_queue.id) + self.output_queue.write(message) + if self.output_domain: + boto.log.info('Writing message to SDB domain: %s' % self.output_domain.name) + item_name = '/'.join([message['Service-Write'], message['Bucket'], message['InputKey']]) + self.output_domain.put_attributes(item_name, message) + + # delete message from input queue + def delete_message(self, message): + boto.log.info('deleting message from %s' % self.input_queue.id) + self.input_queue.delete_message(message) + + # to clean up any files, etc. after each iteration + def cleanup(self): + pass + + def shutdown(self): + on_completion = self.sd.get('on_completion', 'shutdown') + if on_completion == 'shutdown': + if self.instance_id: + time.sleep(60) + c = boto.connect_ec2() + c.terminate_instances([self.instance_id]) + + def main(self, notify=False): + self.notify('Service: %s Starting' % self.name) + empty_reads = 0 + while self.retry_count < 0 or empty_reads < self.retry_count: + try: + input_message = self.read_message() + if input_message: + empty_reads = 0 + output_message = ServiceMessage(None, input_message.get_body()) + input_file = self.get_file(input_message) + results = self.process_file(input_file, output_message) + self.save_results(results, input_message, output_message) + self.write_message(output_message) + self.delete_message(input_message) + self.cleanup() + else: + empty_reads += 1 + time.sleep(self.loop_delay) + except Exception: + boto.log.exception('Service Failed') + empty_reads += 1 + self.notify('Service: %s Shutting Down' % self.name) + self.shutdown() + diff --git a/awx/lib/site-packages/boto/services/servicedef.py b/awx/lib/site-packages/boto/services/servicedef.py new file mode 100644 index 0000000000..1cb01aa754 --- /dev/null +++ b/awx/lib/site-packages/boto/services/servicedef.py @@ -0,0 +1,91 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.pyami.config import Config +from boto.services.message import ServiceMessage +import boto + +class ServiceDef(Config): + + def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None): + Config.__init__(self, config_file) + self.aws_access_key_id = aws_access_key_id + self.aws_secret_access_key = aws_secret_access_key + script = Config.get(self, 'Pyami', 'scripts') + if script: + self.name = script.split('.')[-1] + else: + self.name = None + + + def get(self, name, default=None): + return Config.get(self, self.name, name, default) + + def has_option(self, option): + return Config.has_option(self, self.name, option) + + def getint(self, option, default=0): + try: + val = Config.get(self, self.name, option) + val = int(val) + except: + val = int(default) + return val + + def getbool(self, option, default=False): + try: + val = Config.get(self, self.name, option) + if val.lower() == 'true': + val = True + else: + val = False + except: + val = default + return val + + def get_obj(self, name): + """ + Returns the AWS object associated with a given option. + + The heuristics used are a bit lame. If the option name contains + the word 'bucket' it is assumed to be an S3 bucket, if the name + contains the word 'queue' it is assumed to be an SQS queue and + if it contains the word 'domain' it is assumed to be a SimpleDB + domain. If the option name specified does not exist in the + config file or if the AWS object cannot be retrieved this + returns None. + """ + val = self.get(name) + if not val: + return None + if name.find('queue') >= 0: + obj = boto.lookup('sqs', val) + if obj: + obj.set_message_class(ServiceMessage) + elif name.find('bucket') >= 0: + obj = boto.lookup('s3', val) + elif name.find('domain') >= 0: + obj = boto.lookup('sdb', val) + else: + obj = None + return obj + + diff --git a/awx/lib/site-packages/boto/services/sonofmmm.cfg b/awx/lib/site-packages/boto/services/sonofmmm.cfg new file mode 100644 index 0000000000..d70d3794d5 --- /dev/null +++ b/awx/lib/site-packages/boto/services/sonofmmm.cfg @@ -0,0 +1,43 @@ +# +# Your AWS Credentials +# You only need to supply these in this file if you are not using +# the boto tools to start your service +# +#[Credentials] +#aws_access_key_id = +#aws_secret_access_key = + +# +# Fill out this section if you want emails from the service +# when it starts and stops +# +#[Notification] +#smtp_host = +#smtp_user = +#smtp_pass = +#smtp_from = +#smtp_to = + +[Pyami] +scripts = boto.services.sonofmmm.SonOfMMM + +[SonOfMMM] +# id of the AMI to be launched +ami_id = ami-dc799cb5 +# number of times service will read an empty queue before exiting +# a negative value will cause the service to run forever +retry_count = 5 +# seconds to wait after empty queue read before reading again +loop_delay = 10 +# average time it takes to process a transaction +# controls invisibility timeout of messages +processing_time = 60 +ffmpeg_args = -y -i %%s -f mov -r 29.97 -b 1200kb -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -ar 48000 -ab 19200 -s 320x240 -vcodec mpeg4 -acodec libfaac %%s +output_mimetype = video/quicktime +output_ext = .mov +input_bucket = +output_bucket = +output_domain = +output_queue = +input_queue = + diff --git a/awx/lib/site-packages/boto/services/sonofmmm.py b/awx/lib/site-packages/boto/services/sonofmmm.py new file mode 100644 index 0000000000..acb7e61067 --- /dev/null +++ b/awx/lib/site-packages/boto/services/sonofmmm.py @@ -0,0 +1,81 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.services.service import Service +from boto.services.message import ServiceMessage +import os +import mimetypes + +class SonOfMMM(Service): + + def __init__(self, config_file=None): + Service.__init__(self, config_file) + self.log_file = '%s.log' % self.instance_id + self.log_path = os.path.join(self.working_dir, self.log_file) + boto.set_file_logger(self.name, self.log_path) + if self.sd.has_option('ffmpeg_args'): + self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args') + else: + self.command = '/usr/local/bin/ffmpeg -y -i %s %s' + self.output_mimetype = self.sd.get('output_mimetype') + if self.sd.has_option('output_ext'): + self.output_ext = self.sd.get('output_ext') + else: + self.output_ext = mimetypes.guess_extension(self.output_mimetype) + self.output_bucket = self.sd.get_obj('output_bucket') + self.input_bucket = self.sd.get_obj('input_bucket') + # check to see if there are any messages queue + # if not, create messages for all files in input_bucket + m = self.input_queue.read(1) + if not m: + self.queue_files() + + def queue_files(self): + boto.log.info('Queueing files from %s' % self.input_bucket.name) + for key in self.input_bucket: + boto.log.info('Queueing %s' % key.name) + m = ServiceMessage() + if self.output_bucket: + d = {'OutputBucket' : self.output_bucket.name} + else: + d = None + m.for_key(key, d) + self.input_queue.write(m) + + def process_file(self, in_file_name, msg): + base, ext = os.path.splitext(in_file_name) + out_file_name = os.path.join(self.working_dir, + base+self.output_ext) + command = self.command % (in_file_name, out_file_name) + boto.log.info('running:\n%s' % command) + status = self.run(command) + if status == 0: + return [(out_file_name, self.output_mimetype)] + else: + return [] + + def shutdown(self): + if os.path.isfile(self.log_path): + if self.output_bucket: + key = self.output_bucket.new_key(self.log_file) + key.set_contents_from_filename(self.log_path) + Service.shutdown(self) diff --git a/awx/lib/site-packages/boto/services/submit.py b/awx/lib/site-packages/boto/services/submit.py new file mode 100644 index 0000000000..89c439c525 --- /dev/null +++ b/awx/lib/site-packages/boto/services/submit.py @@ -0,0 +1,88 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import time +import os + + +class Submitter: + + def __init__(self, sd): + self.sd = sd + self.input_bucket = self.sd.get_obj('input_bucket') + self.output_bucket = self.sd.get_obj('output_bucket') + self.output_domain = self.sd.get_obj('output_domain') + self.queue = self.sd.get_obj('input_queue') + + def get_key_name(self, fullpath, prefix): + key_name = fullpath[len(prefix):] + l = key_name.split(os.sep) + return '/'.join(l) + + def write_message(self, key, metadata): + if self.queue: + m = self.queue.new_message() + m.for_key(key, metadata) + if self.output_bucket: + m['OutputBucket'] = self.output_bucket.name + self.queue.write(m) + + def submit_file(self, path, metadata=None, cb=None, num_cb=0, prefix='/'): + if not metadata: + metadata = {} + key_name = self.get_key_name(path, prefix) + k = self.input_bucket.new_key(key_name) + k.update_metadata(metadata) + k.set_contents_from_filename(path, replace=False, cb=cb, num_cb=num_cb) + self.write_message(k, metadata) + + def submit_path(self, path, tags=None, ignore_dirs=None, cb=None, num_cb=0, status=False, prefix='/'): + path = os.path.expanduser(path) + path = os.path.expandvars(path) + path = os.path.abspath(path) + total = 0 + metadata = {} + if tags: + metadata['Tags'] = tags + l = [] + for t in time.gmtime(): + l.append(str(t)) + metadata['Batch'] = '_'.join(l) + if self.output_domain: + self.output_domain.put_attributes(metadata['Batch'], {'type' : 'Batch'}) + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + if ignore_dirs: + for ignore in ignore_dirs: + if ignore in dirs: + dirs.remove(ignore) + for file in files: + fullpath = os.path.join(root, file) + if status: + print 'Submitting %s' % fullpath + self.submit_file(fullpath, metadata, cb, num_cb, prefix) + total += 1 + elif os.path.isfile(path): + self.submit_file(path, metadata, cb, num_cb) + total += 1 + else: + print 'problem with %s' % path + return (metadata['Batch'], total) diff --git a/awx/lib/site-packages/boto/ses/__init__.py b/awx/lib/site-packages/boto/ses/__init__.py new file mode 100644 index 0000000000..b3d03ae3d5 --- /dev/null +++ b/awx/lib/site-packages/boto/ses/__init__.py @@ -0,0 +1,54 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Harry Marr http://hmarr.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from connection import SESConnection +from boto.regioninfo import RegionInfo + + +def regions(): + """ + Get all available regions for the SES service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return [RegionInfo(name='us-east-1', + endpoint='email.us-east-1.amazonaws.com', + connection_cls=SESConnection)] + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ses.connection.SESConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.ses.connection.SESConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/ses/connection.py b/awx/lib/site-packages/boto/ses/connection.py new file mode 100644 index 0000000000..8652042db1 --- /dev/null +++ b/awx/lib/site-packages/boto/ses/connection.py @@ -0,0 +1,522 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Harry Marr http://hmarr.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import re +import urllib +import base64 + +from boto.connection import AWSAuthConnection +from boto.exception import BotoServerError +from boto.regioninfo import RegionInfo +import boto +import boto.jsonresponse +from boto.ses import exceptions as ses_exceptions + + +class SESConnection(AWSAuthConnection): + + ResponseError = BotoServerError + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'email.us-east-1.amazonaws.com' + APIVersion = '2010-12-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + AWSAuthConnection.__init__(self, self.region.endpoint, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs) + + def _required_auth_capability(self): + return ['ses'] + + def _build_list_params(self, params, items, label): + """Add an AWS API-compatible parameter list to a dictionary. + + :type params: dict + :param params: The parameter dictionary + + :type items: list + :param items: Items to be included in the list + + :type label: string + :param label: The parameter list's name + """ + if isinstance(items, basestring): + items = [items] + for i in range(1, len(items) + 1): + params['%s.%d' % (label, i)] = items[i - 1] + + def _make_request(self, action, params=None): + """Make a call to the SES API. + + :type action: string + :param action: The API method to use (e.g. SendRawEmail) + + :type params: dict + :param params: Parameters that will be sent as POST data with the API + call. + """ + ct = 'application/x-www-form-urlencoded; charset=UTF-8' + headers = {'Content-Type': ct} + params = params or {} + params['Action'] = action + + for k, v in params.items(): + if isinstance(v, unicode): # UTF-8 encode only if it's Unicode + params[k] = v.encode('utf-8') + + response = super(SESConnection, self).make_request( + 'POST', + '/', + headers=headers, + data=urllib.urlencode(params) + ) + body = response.read() + if response.status == 200: + list_markers = ('VerifiedEmailAddresses', 'Identities', + 'DkimTokens', 'VerificationAttributes', + 'SendDataPoints') + item_markers = ('member', 'item', 'entry') + + e = boto.jsonresponse.Element(list_marker=list_markers, + item_marker=item_markers) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + # HTTP codes other than 200 are considered errors. Go through + # some error handling to determine which exception gets raised, + self._handle_error(response, body) + + def _handle_error(self, response, body): + """ + Handle raising the correct exception, depending on the error. Many + errors share the same HTTP response code, meaning we have to get really + kludgey and do string searches to figure out what went wrong. + """ + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + + if "Address blacklisted." in body: + # Delivery failures happened frequently enough with the recipient's + # email address for Amazon to blacklist it. After a day or three, + # they'll be automatically removed, and delivery can be attempted + # again (if you write the code to do so in your application). + ExceptionToRaise = ses_exceptions.SESAddressBlacklistedError + exc_reason = "Address blacklisted." + elif "Email address is not verified." in body: + # This error happens when the "Reply-To" value passed to + # send_email() hasn't been verified yet. + ExceptionToRaise = ses_exceptions.SESAddressNotVerifiedError + exc_reason = "Email address is not verified." + elif "Daily message quota exceeded." in body: + # Encountered when your account exceeds the maximum total number + # of emails per 24 hours. + ExceptionToRaise = ses_exceptions.SESDailyQuotaExceededError + exc_reason = "Daily message quota exceeded." + elif "Maximum sending rate exceeded." in body: + # Your account has sent above its allowed requests a second rate. + ExceptionToRaise = ses_exceptions.SESMaxSendingRateExceededError + exc_reason = "Maximum sending rate exceeded." + elif "Domain ends with dot." in body: + # Recipient address ends with a dot/period. This is invalid. + ExceptionToRaise = ses_exceptions.SESDomainEndsWithDotError + exc_reason = "Domain ends with dot." + elif "Local address contains control or whitespace" in body: + # I think this pertains to the recipient address. + ExceptionToRaise = ses_exceptions.SESLocalAddressCharacterError + exc_reason = "Local address contains control or whitespace." + elif "Illegal address" in body: + # A clearly mal-formed address. + ExceptionToRaise = ses_exceptions.SESIllegalAddressError + exc_reason = "Illegal address" + # The re.search is to distinguish from the + # SESAddressNotVerifiedError error above. + elif re.search('Identity.*is not verified', body): + ExceptionToRaise = ses_exceptions.SESIdentityNotVerifiedError + exc_reason = "Identity is not verified." + elif "ownership not confirmed" in body: + ExceptionToRaise = ses_exceptions.SESDomainNotConfirmedError + exc_reason = "Domain ownership is not confirmed." + else: + # This is either a common AWS error, or one that we don't devote + # its own exception to. + ExceptionToRaise = self.ResponseError + exc_reason = response.reason + + raise ExceptionToRaise(response.status, exc_reason, body) + + def send_email(self, source, subject, body, to_addresses, + cc_addresses=None, bcc_addresses=None, + format='text', reply_addresses=None, + return_path=None, text_body=None, html_body=None): + """Composes an email message based on input data, and then immediately + queues the message for sending. + + :type source: string + :param source: The sender's email address. + + :type subject: string + :param subject: The subject of the message: A short summary of the + content, which will appear in the recipient's inbox. + + :type body: string + :param body: The message body. + + :type to_addresses: list of strings or string + :param to_addresses: The To: field(s) of the message. + + :type cc_addresses: list of strings or string + :param cc_addresses: The CC: field(s) of the message. + + :type bcc_addresses: list of strings or string + :param bcc_addresses: The BCC: field(s) of the message. + + :type format: string + :param format: The format of the message's body, must be either "text" + or "html". + + :type reply_addresses: list of strings or string + :param reply_addresses: The reply-to email address(es) for the + message. If the recipient replies to the + message, each reply-to address will + receive the reply. + + :type return_path: string + :param return_path: The email address to which bounce notifications are + to be forwarded. If the message cannot be delivered + to the recipient, then an error message will be + returned from the recipient's ISP; this message + will then be forwarded to the email address + specified by the ReturnPath parameter. + + :type text_body: string + :param text_body: The text body to send with this email. + + :type html_body: string + :param html_body: The html body to send with this email. + + """ + format = format.lower().strip() + if body is not None: + if format == "text": + if text_body is not None: + raise Warning("You've passed in both a body and a " + "text_body; please choose one or the other.") + text_body = body + else: + if html_body is not None: + raise Warning("You've passed in both a body and an " + "html_body; please choose one or the other.") + html_body = body + + params = { + 'Source': source, + 'Message.Subject.Data': subject, + } + + if return_path: + params['ReturnPath'] = return_path + + if html_body is not None: + params['Message.Body.Html.Data'] = html_body + if text_body is not None: + params['Message.Body.Text.Data'] = text_body + + if(format not in ("text", "html")): + raise ValueError("'format' argument must be 'text' or 'html'") + + if(not (html_body or text_body)): + raise ValueError("No text or html body found for mail") + + self._build_list_params(params, to_addresses, + 'Destination.ToAddresses.member') + if cc_addresses: + self._build_list_params(params, cc_addresses, + 'Destination.CcAddresses.member') + + if bcc_addresses: + self._build_list_params(params, bcc_addresses, + 'Destination.BccAddresses.member') + + if reply_addresses: + self._build_list_params(params, reply_addresses, + 'ReplyToAddresses.member') + + return self._make_request('SendEmail', params) + + def send_raw_email(self, raw_message, source=None, destinations=None): + """Sends an email message, with header and content specified by the + client. The SendRawEmail action is useful for sending multipart MIME + emails, with attachments or inline content. The raw text of the message + must comply with Internet email standards; otherwise, the message + cannot be sent. + + :type source: string + :param source: The sender's email address. Amazon's docs say: + + If you specify the Source parameter, then bounce notifications and + complaints will be sent to this email address. This takes precedence + over any Return-Path header that you might include in the raw text of + the message. + + :type raw_message: string + :param raw_message: The raw text of the message. The client is + responsible for ensuring the following: + + - Message must contain a header and a body, separated by a blank line. + - All required header fields must be present. + - Each part of a multipart MIME message must be formatted properly. + - MIME content types must be among those supported by Amazon SES. + Refer to the Amazon SES Developer Guide for more details. + - Content must be base64-encoded, if MIME requires it. + + :type destinations: list of strings or string + :param destinations: A list of destinations for the message. + + """ + + if isinstance(raw_message, unicode): + raw_message = raw_message.encode('utf-8') + + params = { + 'RawMessage.Data': base64.b64encode(raw_message), + } + + if source: + params['Source'] = source + + if destinations: + self._build_list_params(params, destinations, + 'Destinations.member') + + return self._make_request('SendRawEmail', params) + + def list_verified_email_addresses(self): + """Fetch a list of the email addresses that have been verified. + + :rtype: dict + :returns: A ListVerifiedEmailAddressesResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('ListVerifiedEmailAddresses') + + def get_send_quota(self): + """Fetches the user's current activity limits. + + :rtype: dict + :returns: A GetSendQuotaResponse structure. Note that keys must be + unicode strings. + """ + return self._make_request('GetSendQuota') + + def get_send_statistics(self): + """Fetches the user's sending statistics. The result is a list of data + points, representing the last two weeks of sending activity. + + Each data point in the list contains statistics for a 15-minute + interval. + + :rtype: dict + :returns: A GetSendStatisticsResponse structure. Note that keys must be + unicode strings. + """ + return self._make_request('GetSendStatistics') + + def delete_verified_email_address(self, email_address): + """Deletes the specified email address from the list of verified + addresses. + + :type email_adddress: string + :param email_address: The email address to be removed from the list of + verified addreses. + + :rtype: dict + :returns: A DeleteVerifiedEmailAddressResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('DeleteVerifiedEmailAddress', { + 'EmailAddress': email_address, + }) + + def verify_email_address(self, email_address): + """Verifies an email address. This action causes a confirmation email + message to be sent to the specified address. + + :type email_adddress: string + :param email_address: The email address to be verified. + + :rtype: dict + :returns: A VerifyEmailAddressResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('VerifyEmailAddress', { + 'EmailAddress': email_address, + }) + + def verify_domain_dkim(self, domain): + """ + Returns a set of DNS records, or tokens, that must be published in the + domain name's DNS to complete the DKIM verification process. These + tokens are DNS ``CNAME`` records that point to DKIM public keys hosted + by Amazon SES. To complete the DKIM verification process, these tokens + must be published in the domain's DNS. The tokens must remain + published in order for Easy DKIM signing to function correctly. + + After the tokens are added to the domain's DNS, Amazon SES will be able + to DKIM-sign email originating from that domain. To enable or disable + Easy DKIM signing for a domain, use the ``SetIdentityDkimEnabled`` + action. For more information about Easy DKIM, go to the `Amazon SES + Developer Guide + `_. + + :type domain: string + :param domain: The domain name. + + """ + return self._make_request('VerifyDomainDkim', { + 'Domain': domain, + }) + + def set_identity_dkim_enabled(self, identity, dkim_enabled): + """Enables or disables DKIM signing of email sent from an identity. + + * If Easy DKIM signing is enabled for a domain name identity (e.g., + * ``example.com``), + then Amazon SES will DKIM-sign all email sent by addresses under that + domain name (e.g., ``user@example.com``) + * If Easy DKIM signing is enabled for an email address, then Amazon SES + will DKIM-sign all email sent by that email address. + + For email addresses (e.g., ``user@example.com``), you can only enable + Easy DKIM signing if the corresponding domain (e.g., ``example.com``) + has been set up for Easy DKIM using the AWS Console or the + ``VerifyDomainDkim`` action. + + :type identity: string + :param identity: An email address or domain name. + + :type dkim_enabled: bool + :param dkim_enabled: Specifies whether or not to enable DKIM signing. + + """ + return self._make_request('SetIdentityDkimEnabled', { + 'Identity': identity, + 'DkimEnabled': 'true' if dkim_enabled else 'false' + }) + + def get_identity_dkim_attributes(self, identities): + """Get attributes associated with a list of verified identities. + + Given a list of verified identities (email addresses and/or domains), + returns a structure describing identity notification attributes. + + :type identities: list + :param identities: A list of verified identities (email addresses + and/or domains). + + """ + params = {} + self._build_list_params(params, identities, 'Identities.member') + return self._make_request('GetIdentityDkimAttributes', params) + + def list_identities(self): + """Returns a list containing all of the identities (email addresses + and domains) for a specific AWS Account, regardless of + verification status. + + :rtype: dict + :returns: A ListIdentitiesResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('ListIdentities') + + def get_identity_verification_attributes(self, identities): + """Given a list of identities (email addresses and/or domains), + returns the verification status and (for domain identities) + the verification token for each identity. + + :type identities: list of strings or string + :param identities: List of identities. + + :rtype: dict + :returns: A GetIdentityVerificationAttributesResponse structure. + Note that keys must be unicode strings. + """ + params = {} + self._build_list_params(params, identities, + 'Identities.member') + return self._make_request('GetIdentityVerificationAttributes', params) + + def verify_domain_identity(self, domain): + """Verifies a domain. + + :type domain: string + :param domain: The domain to be verified. + + :rtype: dict + :returns: A VerifyDomainIdentityResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('VerifyDomainIdentity', { + 'Domain': domain, + }) + + def verify_email_identity(self, email_address): + """Verifies an email address. This action causes a confirmation + email message to be sent to the specified address. + + :type email_adddress: string + :param email_address: The email address to be verified. + + :rtype: dict + :returns: A VerifyEmailIdentityResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('VerifyEmailIdentity', { + 'EmailAddress': email_address, + }) + + def delete_identity(self, identity): + """Deletes the specified identity (email address or domain) from + the list of verified identities. + + :type identity: string + :param identity: The identity to be deleted. + + :rtype: dict + :returns: A DeleteIdentityResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('DeleteIdentity', { + 'Identity': identity, + }) diff --git a/awx/lib/site-packages/boto/ses/exceptions.py b/awx/lib/site-packages/boto/ses/exceptions.py new file mode 100644 index 0000000000..c3341ece0d --- /dev/null +++ b/awx/lib/site-packages/boto/ses/exceptions.py @@ -0,0 +1,77 @@ +""" +Various exceptions that are specific to the SES module. +""" +from boto.exception import BotoServerError + +class SESError(BotoServerError): + """ + Sub-class all SES-related errors from here. Don't raise this error + directly from anywhere. The only thing this gets us is the ability to + catch SESErrors separately from the more generic, top-level + BotoServerError exception. + """ + pass + + + +class SESAddressNotVerifiedError(SESError): + """ + Raised when a "Reply-To" address has not been validated in SES yet. + """ + pass + +class SESIdentityNotVerifiedError(SESError): + """ + Raised when an identity (domain or address) has not been verified in SES yet. + """ + pass + +class SESDomainNotConfirmedError(SESError): + """ + """ + pass + +class SESAddressBlacklistedError(SESError): + """ + After you attempt to send mail to an address, and delivery repeatedly + fails, said address is blacklisted for at least 24 hours. The blacklisting + eventually expires, and you are able to attempt delivery again. If you + attempt to send mail to a blacklisted email, this is raised. + """ + pass + + +class SESDailyQuotaExceededError(SESError): + """ + Your account's daily (rolling 24 hour total) allotment of outbound emails + has been exceeded. + """ + pass + + +class SESMaxSendingRateExceededError(SESError): + """ + Your account's requests/second limit has been exceeded. + """ + pass + + +class SESDomainEndsWithDotError(SESError): + """ + Recipient's email address' domain ends with a period/dot. + """ + pass + + +class SESLocalAddressCharacterError(SESError): + """ + An address contained a control or whitespace character. + """ + pass + + +class SESIllegalAddressError(SESError): + """ + Raised when an illegal address is encountered. + """ + pass diff --git a/awx/lib/site-packages/boto/sns/__init__.py b/awx/lib/site-packages/boto/sns/__init__.py new file mode 100644 index 0000000000..4ed0539ab5 --- /dev/null +++ b/awx/lib/site-packages/boto/sns/__init__.py @@ -0,0 +1,81 @@ +# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# this is here for backward compatibility +# originally, the SNSConnection class was defined here +from connection import SNSConnection +from boto.regioninfo import RegionInfo + + +def regions(): + """ + Get all available regions for the SNS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return [RegionInfo(name='us-east-1', + endpoint='sns.us-east-1.amazonaws.com', + connection_cls=SNSConnection), + RegionInfo(name='eu-west-1', + endpoint='sns.eu-west-1.amazonaws.com', + connection_cls=SNSConnection), + RegionInfo(name='us-gov-west-1', + endpoint='sns.us-gov-west-1.amazonaws.com', + connection_cls=SNSConnection), + RegionInfo(name='us-west-1', + endpoint='sns.us-west-1.amazonaws.com', + connection_cls=SNSConnection), + RegionInfo(name='sa-east-1', + endpoint='sns.sa-east-1.amazonaws.com', + connection_cls=SNSConnection), + RegionInfo(name='us-west-2', + endpoint='sns.us-west-2.amazonaws.com', + connection_cls=SNSConnection), + RegionInfo(name='ap-northeast-1', + endpoint='sns.ap-northeast-1.amazonaws.com', + connection_cls=SNSConnection), + RegionInfo(name='ap-southeast-1', + endpoint='sns.ap-southeast-1.amazonaws.com', + connection_cls=SNSConnection), + RegionInfo(name='ap-southeast-2', + endpoint='sns.ap-southeast-2.amazonaws.com', + connection_cls=SNSConnection), + ] + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sns.connection.SNSConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sns.connection.SNSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/sns/connection.py b/awx/lib/site-packages/boto/sns/connection.py new file mode 100644 index 0000000000..73f3d9e93f --- /dev/null +++ b/awx/lib/site-packages/boto/sns/connection.py @@ -0,0 +1,726 @@ +# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid +import hashlib + +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.compat import json +import boto + + +class SNSConnection(AWSQueryConnection): + """ + Amazon Simple Notification Service + Amazon Simple Notification Service (Amazon SNS) is a web service + that enables you to build distributed web-enabled applications. + Applications can use Amazon SNS to easily push real-time + notification messages to interested subscribers over multiple + delivery protocols. For more information about this product see + `http://aws.amazon.com/sns`_. For detailed information about + Amazon SNS features and their associated API calls, see the + `Amazon SNS Developer Guide`_. + + We also provide SDKs that enable you to access Amazon SNS from + your preferred programming language. The SDKs contain + functionality that automatically takes care of tasks such as: + cryptographically signing your service requests, retrying + requests, and handling error responses. For a list of available + SDKs, go to `Tools for Amazon Web Services`_. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com' + APIVersion = '2010-03-31' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, + connection_cls=SNSConnection) + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs) + + def _build_dict_as_list_params(self, params, dictionary, name): + """ + Serialize a parameter 'name' which value is a 'dictionary' into a list of parameters. + + See: http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html + For example:: + + dictionary = {'PlatformPrincipal': 'foo', 'PlatformCredential': 'bar'} + name = 'Attributes' + + would result in params dict being populated with: + Attributes.entry.1.key = PlatformPrincipal + Attributes.entry.1.value = foo + Attributes.entry.2.key = PlatformCredential + Attributes.entry.2.value = bar + + :param params: the resulting parameters will be added to this dict + :param dictionary: dict - value of the serialized parameter + :param name: name of the serialized parameter + """ + items = sorted(dictionary.items(), key=lambda x:x[0]) + for kv, index in zip(items, range(1, len(items)+1)): + key, value = kv + prefix = '%s.entry.%s' % (name, index) + params['%s.key' % prefix] = key + params['%s.value' % prefix] = value + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_all_topics(self, next_token=None): + """ + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListTopics', params) + + def get_topic_attributes(self, topic): + """ + Get attributes of a Topic + + :type topic: string + :param topic: The ARN of the topic. + + """ + params = {'TopicArn': topic} + return self._make_request('GetTopicAttributes', params) + + def set_topic_attributes(self, topic, attr_name, attr_value): + """ + Get attributes of a Topic + + :type topic: string + :param topic: The ARN of the topic. + + :type attr_name: string + :param attr_name: The name of the attribute you want to set. + Only a subset of the topic's attributes are mutable. + Valid values: Policy | DisplayName + + :type attr_value: string + :param attr_value: The new value for the attribute. + + """ + params = {'TopicArn': topic, + 'AttributeName': attr_name, + 'AttributeValue': attr_value} + return self._make_request('SetTopicAttributes', params) + + def add_permission(self, topic, label, account_ids, actions): + """ + Adds a statement to a topic's access control policy, granting + access for the specified AWS accounts to the specified actions. + + :type topic: string + :param topic: The ARN of the topic. + + :type label: string + :param label: A unique identifier for the new policy statement. + + :type account_ids: list of strings + :param account_ids: The AWS account ids of the users who will be + give access to the specified actions. + + :type actions: list of strings + :param actions: The actions you want to allow for each of the + specified principal(s). + + """ + params = {'TopicArn': topic, + 'Label': label} + self.build_list_params(params, account_ids, 'AWSAccountId.member') + self.build_list_params(params, actions, 'ActionName.member') + return self._make_request('AddPermission', params) + + def remove_permission(self, topic, label): + """ + Removes a statement from a topic's access control policy. + + :type topic: string + :param topic: The ARN of the topic. + + :type label: string + :param label: A unique identifier for the policy statement + to be removed. + + """ + params = {'TopicArn': topic, + 'Label': label} + return self._make_request('RemovePermission', params) + + def create_topic(self, topic): + """ + Create a new Topic. + + :type topic: string + :param topic: The name of the new topic. + + """ + params = {'Name': topic} + return self._make_request('CreateTopic', params) + + def delete_topic(self, topic): + """ + Delete an existing topic + + :type topic: string + :param topic: The ARN of the topic + + """ + params = {'TopicArn': topic} + return self._make_request('DeleteTopic', params, '/', 'GET') + + def publish(self, topic=None, message=None, subject=None, target_arn=None, + message_structure=None): + """ + Get properties of a Topic + + :type topic: string + :param topic: The ARN of the new topic. + + :type message: string + :param message: The message you want to send to the topic. + Messages must be UTF-8 encoded strings and + be at most 4KB in size. + + :type message_structure: string + :param message_structure: Optional parameter. If left as ``None``, + plain text will be sent. If set to ``json``, + your message should be a JSON string that + matches the structure described at + http://docs.aws.amazon.com/sns/latest/dg/PublishTopic.html#sns-message-formatting-by-protocol + + :type subject: string + :param subject: Optional parameter to be used as the "Subject" + line of the email notifications. + + :type target_arn: string + :param target_arn: Optional parameter for either TopicArn or + EndpointArn, but not both. + + """ + if message is None: + # To be backwards compatible when message did not have + # a default value and topic and message were required + # args. + raise TypeError("'message' is a required parameter") + params = {'Message': message} + if subject is not None: + params['Subject'] = subject + if topic is not None: + params['TopicArn'] = topic + if target_arn is not None: + params['TargetArn'] = target_arn + if message_structure is not None: + params['MessageStructure'] = message_structure + return self._make_request('Publish', params, '/', 'POST') + + def subscribe(self, topic, protocol, endpoint): + """ + Subscribe to a Topic. + + :type topic: string + :param topic: The ARN of the new topic. + + :type protocol: string + :param protocol: The protocol used to communicate with + the subscriber. Current choices are: + email|email-json|http|https|sqs + + :type endpoint: string + :param endpoint: The location of the endpoint for + the subscriber. + * For email, this would be a valid email address + * For email-json, this would be a valid email address + * For http, this would be a URL beginning with http + * For https, this would be a URL beginning with https + * For sqs, this would be the ARN of an SQS Queue + """ + params = {'TopicArn': topic, + 'Protocol': protocol, + 'Endpoint': endpoint} + return self._make_request('Subscribe', params) + + def subscribe_sqs_queue(self, topic, queue): + """ + Subscribe an SQS queue to a topic. + + This is convenience method that handles most of the complexity involved + in using an SQS queue as an endpoint for an SNS topic. To achieve this + the following operations are performed: + + * The correct ARN is constructed for the SQS queue and that ARN is + then subscribed to the topic. + * A JSON policy document is contructed that grants permission to + the SNS topic to send messages to the SQS queue. + * This JSON policy is then associated with the SQS queue using + the queue's set_attribute method. If the queue already has + a policy associated with it, this process will add a Statement to + that policy. If no policy exists, a new policy will be created. + + :type topic: string + :param topic: The ARN of the new topic. + + :type queue: A boto Queue object + :param queue: The queue you wish to subscribe to the SNS Topic. + """ + t = queue.id.split('/') + q_arn = queue.arn + sid = hashlib.md5(topic + q_arn).hexdigest() + sid_exists = False + resp = self.subscribe(topic, 'sqs', q_arn) + attr = queue.get_attributes('Policy') + if 'Policy' in attr: + policy = json.loads(attr['Policy']) + else: + policy = {} + if 'Version' not in policy: + policy['Version'] = '2008-10-17' + if 'Statement' not in policy: + policy['Statement'] = [] + # See if a Statement with the Sid exists already. + for s in policy['Statement']: + if s['Sid'] == sid: + sid_exists = True + if not sid_exists: + statement = {'Action': 'SQS:SendMessage', + 'Effect': 'Allow', + 'Principal': {'AWS': '*'}, + 'Resource': q_arn, + 'Sid': sid, + 'Condition': {'StringLike': {'aws:SourceArn': topic}}} + policy['Statement'].append(statement) + queue.set_attribute('Policy', json.dumps(policy)) + return resp + + def confirm_subscription(self, topic, token, + authenticate_on_unsubscribe=False): + """ + Get properties of a Topic + + :type topic: string + :param topic: The ARN of the new topic. + + :type token: string + :param token: Short-lived token sent to and endpoint during + the Subscribe operation. + + :type authenticate_on_unsubscribe: bool + :param authenticate_on_unsubscribe: Optional parameter indicating + that you wish to disable + unauthenticated unsubscription + of the subscription. + + """ + params = {'TopicArn': topic, 'Token': token} + if authenticate_on_unsubscribe: + params['AuthenticateOnUnsubscribe'] = 'true' + return self._make_request('ConfirmSubscription', params) + + def unsubscribe(self, subscription): + """ + Allows endpoint owner to delete subscription. + Confirmation message will be delivered. + + :type subscription: string + :param subscription: The ARN of the subscription to be deleted. + + """ + params = {'SubscriptionArn': subscription} + return self._make_request('Unsubscribe', params) + + def get_all_subscriptions(self, next_token=None): + """ + Get list of all subscriptions. + + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListSubscriptions', params) + + def get_all_subscriptions_by_topic(self, topic, next_token=None): + """ + Get list of all subscriptions to a specific topic. + + :type topic: string + :param topic: The ARN of the topic for which you wish to + find subscriptions. + + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {'TopicArn': topic} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListSubscriptionsByTopic', params) + + def create_platform_application(self, name=None, platform=None, + attributes=None): + """ + The `CreatePlatformApplication` action creates a platform + application object for one of the supported push notification + services, such as APNS and GCM, to which devices and mobile + apps may register. You must specify PlatformPrincipal and + PlatformCredential attributes when using the + `CreatePlatformApplication` action. The PlatformPrincipal is + received from the notification service. For APNS/APNS_SANDBOX, + PlatformPrincipal is "SSL certificate". For GCM, + PlatformPrincipal is not applicable. For ADM, + PlatformPrincipal is "client id". The PlatformCredential is + also received from the notification service. For + APNS/APNS_SANDBOX, PlatformCredential is "private key". For + GCM, PlatformCredential is "API key". For ADM, + PlatformCredential is "client secret". The + PlatformApplicationArn that is returned when using + `CreatePlatformApplication` is then used as an attribute for + the `CreatePlatformEndpoint` action. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type name: string + :param name: Application names must be made up of only uppercase and + lowercase ASCII letters, numbers, underscores, hyphens, and + periods, and must be between 1 and 256 characters long. + + :type platform: string + :param platform: The following platforms are supported: ADM (Amazon + Device Messaging), APNS (Apple Push Notification Service), + APNS_SANDBOX, and GCM (Google Cloud Messaging). + + :type attributes: map + :param attributes: For a list of attributes, see + `SetPlatformApplicationAttributes`_ + + """ + params = {} + if name is not None: + params['Name'] = name + if platform is not None: + params['Platform'] = platform + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='CreatePlatformApplication', + params=params) + + def set_platform_application_attributes(self, + platform_application_arn=None, + attributes=None): + """ + The `SetPlatformApplicationAttributes` action sets the + attributes of the platform application object for the + supported push notification services, such as APNS and GCM. + For more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + SetPlatformApplicationAttributes action. + + :type attributes: map + :param attributes: + A map of the platform application attributes. Attributes in this map + include the following: + + + + `PlatformCredential` -- The credential received from the notification + service. For APNS/APNS_SANDBOX, PlatformCredential is "private + key". For GCM, PlatformCredential is "API key". For ADM, + PlatformCredential is "client secret". + + `PlatformPrincipal` -- The principal received from the notification + service. For APNS/APNS_SANDBOX, PlatformPrincipal is "SSL + certificate". For GCM, PlatformPrincipal is not applicable. For + ADM, PlatformPrincipal is "client id". + + `EventEndpointCreated` -- Topic ARN to which EndpointCreated event + notifications should be sent. + + `EventEndpointDeleted` -- Topic ARN to which EndpointDeleted event + notifications should be sent. + + `EventEndpointUpdated` -- Topic ARN to which EndpointUpdate event + notifications should be sent. + + `EventDeliveryFailure` -- Topic ARN to which DeliveryFailure event + notifications should be sent upon Direct Publish delivery failure + (permanent) to one of the application's endpoints. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='SetPlatformApplicationAttributes', + params=params) + + def get_platform_application_attributes(self, + platform_application_arn=None): + """ + The `GetPlatformApplicationAttributes` action retrieves the + attributes of the platform application object for the + supported push notification services, such as APNS and GCM. + For more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + GetPlatformApplicationAttributesInput. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + return self._make_request(action='GetPlatformApplicationAttributes', + params=params) + + def list_platform_applications(self, next_token=None): + """ + The `ListPlatformApplications` action lists the platform + application objects for the supported push notification + services, such as APNS and GCM. The results for + `ListPlatformApplications` are paginated and return a limited + list of applications, up to 100. If additional records are + available after the first page results, then a NextToken + string will be returned. To receive the next page, you call + `ListPlatformApplications` using the NextToken string received + from the previous call. When there are no more records to + return, NextToken will be null. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type next_token: string + :param next_token: NextToken string is used when calling + ListPlatformApplications action to retrieve additional records that + are available after the first page results. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self._make_request(action='ListPlatformApplications', + params=params) + + def list_endpoints_by_platform_application(self, + platform_application_arn=None, + next_token=None): + """ + The `ListEndpointsByPlatformApplication` action lists the + endpoints and endpoint attributes for devices in a supported + push notification service, such as GCM and APNS. The results + for `ListEndpointsByPlatformApplication` are paginated and + return a limited list of endpoints, up to 100. If additional + records are available after the first page results, then a + NextToken string will be returned. To receive the next page, + you call `ListEndpointsByPlatformApplication` again using the + NextToken string received from the previous call. When there + are no more records to return, NextToken will be null. For + more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + ListEndpointsByPlatformApplicationInput action. + + :type next_token: string + :param next_token: NextToken string is used when calling + ListEndpointsByPlatformApplication action to retrieve additional + records that are available after the first page results. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if next_token is not None: + params['NextToken'] = next_token + return self._make_request(action='ListEndpointsByPlatformApplication', + params=params) + + def delete_platform_application(self, platform_application_arn=None): + """ + The `DeletePlatformApplication` action deletes a platform + application object for one of the supported push notification + services, such as APNS and GCM. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn of platform + application object to delete. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + return self._make_request(action='DeletePlatformApplication', + params=params) + + def create_platform_endpoint(self, platform_application_arn=None, + token=None, custom_user_data=None, + attributes=None): + """ + The `CreatePlatformEndpoint` creates an endpoint for a device + and mobile app on one of the supported push notification + services, such as GCM and APNS. `CreatePlatformEndpoint` + requires the PlatformApplicationArn that is returned from + `CreatePlatformApplication`. The EndpointArn that is returned + when using `CreatePlatformEndpoint` can then be used by the + `Publish` action to send a message to a mobile app or by the + `Subscribe` action for subscription to a topic. For more + information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn returned from + CreatePlatformApplication is used to create a an endpoint. + + :type token: string + :param token: Unique identifier created by the notification service for + an app on a device. The specific name for Token will vary, + depending on which notification service is being used. For example, + when using APNS as the notification service, you need the device + token. Alternatively, when using GCM or ADM, the device token + equivalent is called the registration ID. + + :type custom_user_data: string + :param custom_user_data: Arbitrary user data to associate with the + endpoint. SNS does not use this data. The data must be in UTF-8 + format and less than 2KB. + + :type attributes: map + :param attributes: For a list of attributes, see + `SetEndpointAttributes`_. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if token is not None: + params['Token'] = token + if custom_user_data is not None: + params['CustomUserData'] = custom_user_data + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='CreatePlatformEndpoint', + params=params) + + def delete_endpoint(self, endpoint_arn=None): + """ + The `DeleteEndpoint` action, which is idempotent, deletes the + endpoint from SNS. For more information, see `Using Amazon SNS + Mobile Push Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn of endpoint to delete. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + return self._make_request(action='DeleteEndpoint', params=params) + + def set_endpoint_attributes(self, endpoint_arn=None, attributes=None): + """ + The `SetEndpointAttributes` action sets the attributes for an + endpoint for a device on one of the supported push + notification services, such as GCM and APNS. For more + information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn used for SetEndpointAttributes action. + + :type attributes: map + :param attributes: + A map of the endpoint attributes. Attributes in this map include the + following: + + + + `CustomUserData` -- arbitrary user data to associate with the + endpoint. SNS does not use this data. The data must be in UTF-8 + format and less than 2KB. + + `Enabled` -- flag that enables/disables delivery to the endpoint. + Message Processor will set this to false when a notification + service indicates to SNS that the endpoint is invalid. Users can + set it back to true, typically after updating Token. + + `Token` -- device token, also referred to as a registration id, for + an app and mobile device. This is returned from the notification + service when an app and mobile device are registered with the + notification service. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='SetEndpointAttributes', + params=params) + + def get_endpoint_attributes(self, endpoint_arn=None): + """ + The `GetEndpointAttributes` retrieves the endpoint attributes + for a device on one of the supported push notification + services, such as GCM and APNS. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn for GetEndpointAttributes input. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + return self._make_request(action='GetEndpointAttributes', + params=params) + + def _make_request(self, action, params, path='/', verb='GET'): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb=verb, + path=path, params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) diff --git a/awx/lib/site-packages/boto/sqs/__init__.py b/awx/lib/site-packages/boto/sqs/__init__.py new file mode 100644 index 0000000000..973b8ba55e --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/__init__.py @@ -0,0 +1,58 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from regioninfo import SQSRegionInfo + + +def regions(): + """ + Get all available regions for the SQS service. + + :rtype: list + :return: A list of :class:`boto.sqs.regioninfo.RegionInfo` + """ + return [SQSRegionInfo(name='us-east-1', + endpoint='queue.amazonaws.com'), + SQSRegionInfo(name='us-gov-west-1', + endpoint='sqs.us-gov-west-1.amazonaws.com'), + SQSRegionInfo(name='eu-west-1', + endpoint='eu-west-1.queue.amazonaws.com'), + SQSRegionInfo(name='us-west-1', + endpoint='us-west-1.queue.amazonaws.com'), + SQSRegionInfo(name='us-west-2', + endpoint='us-west-2.queue.amazonaws.com'), + SQSRegionInfo(name='sa-east-1', + endpoint='sa-east-1.queue.amazonaws.com'), + SQSRegionInfo(name='ap-northeast-1', + endpoint='ap-northeast-1.queue.amazonaws.com'), + SQSRegionInfo(name='ap-southeast-1', + endpoint='ap-southeast-1.queue.amazonaws.com'), + SQSRegionInfo(name='ap-southeast-2', + endpoint='ap-southeast-2.queue.amazonaws.com') + ] + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/sqs/attributes.py b/awx/lib/site-packages/boto/sqs/attributes.py new file mode 100644 index 0000000000..26c720416f --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/attributes.py @@ -0,0 +1,46 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS Attribute Name/Value set +""" + +class Attributes(dict): + + def __init__(self, parent): + self.parent = parent + self.current_key = None + self.current_value = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Attribute': + self[self.current_key] = self.current_value + elif name == 'Name': + self.current_key = value + elif name == 'Value': + self.current_value = value + else: + setattr(self, name, value) + + diff --git a/awx/lib/site-packages/boto/sqs/batchresults.py b/awx/lib/site-packages/boto/sqs/batchresults.py new file mode 100644 index 0000000000..aa5f86b8be --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/batchresults.py @@ -0,0 +1,95 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +A set of results returned by SendMessageBatch. +""" + +class ResultEntry(dict): + """ + The result (successful or unsuccessful) of a single + message within a send_message_batch request. + + In the case of a successful result, this dict-like + object will contain the following items: + + :ivar id: A string containing the user-supplied ID of the message. + :ivar message_id: A string containing the SQS ID of the new message. + :ivar message_md5: A string containing the MD5 hash of the message body. + + In the case of an error, this object will contain the following + items: + + :ivar id: A string containing the user-supplied ID of the message. + :ivar sender_fault: A boolean value. + :ivar error_code: A string containing a short description of the error. + :ivar error_message: A string containing a description of the error. + """ + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self['id'] = value + elif name == 'MessageId': + self['message_id'] = value + elif name == 'MD5OfMessageBody': + self['message_md5'] = value + elif name == 'SenderFault': + self['sender_fault'] = value + elif name == 'Code': + self['error_code'] = value + elif name == 'Message': + self['error_message'] = value + +class BatchResults(object): + """ + A container for the results of a send_message_batch request. + + :ivar results: A list of successful results. Each item in the + list will be an instance of :class:`ResultEntry`. + + :ivar errors: A list of unsuccessful results. Each item in the + list will be an instance of :class:`ResultEntry`. + """ + + def __init__(self, parent): + self.parent = parent + self.results = [] + self.errors = [] + + def startElement(self, name, attrs, connection): + if name.endswith('MessageBatchResultEntry'): + entry = ResultEntry() + self.results.append(entry) + return entry + if name == 'BatchResultErrorEntry': + entry = ResultEntry() + self.errors.append(entry) + return entry + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + diff --git a/awx/lib/site-packages/boto/sqs/connection.py b/awx/lib/site-packages/boto/sqs/connection.py new file mode 100644 index 0000000000..e076de124e --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/connection.py @@ -0,0 +1,413 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.connection import AWSQueryConnection +from boto.sqs.regioninfo import SQSRegionInfo +from boto.sqs.queue import Queue +from boto.sqs.message import Message +from boto.sqs.attributes import Attributes +from boto.sqs.batchresults import BatchResults +from boto.exception import SQSError, BotoServerError + + +class SQSConnection(AWSQueryConnection): + """ + A Connection to the SQS Service. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'queue.amazonaws.com' + APIVersion = '2012-11-05' + DefaultContentType = 'text/plain' + ResponseError = SQSError + AuthServiceName = 'sqs' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True): + if not region: + region = SQSRegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, + proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs) + self.auth_region_name = self.region.name + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_queue(self, queue_name, visibility_timeout=None): + """ + Create an SQS Queue. + + :type queue_name: str or unicode + :param queue_name: The name of the new queue. Names are + scoped to an account and need to be unique within that + account. Calling this method on an existing queue name + will not return an error from SQS unless the value for + visibility_timeout is different than the value of the + existing queue of that name. This is still an expensive + operation, though, and not the preferred way to check for + the existence of a queue. See the + :func:`boto.sqs.connection.SQSConnection.lookup` method. + + :type visibility_timeout: int + :param visibility_timeout: The default visibility timeout for + all messages written in the queue. This can be overridden + on a per-message. + + :rtype: :class:`boto.sqs.queue.Queue` + :return: The newly created queue. + + """ + params = {'QueueName': queue_name} + if visibility_timeout: + params['Attribute.1.Name'] = 'VisibilityTimeout' + params['Attribute.1.Value'] = int(visibility_timeout) + return self.get_object('CreateQueue', params, Queue) + + def delete_queue(self, queue, force_deletion=False): + """ + Delete an SQS Queue. + + :type queue: A Queue object + :param queue: The SQS queue to be deleted + + :type force_deletion: Boolean + :param force_deletion: A deprecated parameter that is no longer used by + SQS's API. + + :rtype: bool + :return: True if the command succeeded, False otherwise + """ + return self.get_status('DeleteQueue', None, queue.id) + + def get_queue_attributes(self, queue, attribute='All'): + """ + Gets one or all attributes of a Queue + + :type queue: A Queue object + :param queue: The SQS queue to be deleted + + :type attribute: str + :type attribute: The specific attribute requested. If not + supplied, the default is to return all attributes. Valid + attributes are: + + * ApproximateNumberOfMessages + * ApproximateNumberOfMessagesNotVisible + * VisibilityTimeout + * CreatedTimestamp + * LastModifiedTimestamp + * Policy + * ReceiveMessageWaitTimeSeconds + + :rtype: :class:`boto.sqs.attributes.Attributes` + :return: An Attributes object containing request value(s). + """ + params = {'AttributeName' : attribute} + return self.get_object('GetQueueAttributes', params, + Attributes, queue.id) + + def set_queue_attribute(self, queue, attribute, value): + params = {'Attribute.Name' : attribute, 'Attribute.Value' : value} + return self.get_status('SetQueueAttributes', params, queue.id) + + def receive_message(self, queue, number_messages=1, + visibility_timeout=None, attributes=None, + wait_time_seconds=None): + """ + Read messages from an SQS Queue. + + :type queue: A Queue object + :param queue: The Queue from which messages are read. + + :type number_messages: int + :param number_messages: The maximum number of messages to read + (default=1) + + :type visibility_timeout: int + :param visibility_timeout: The number of seconds the message should + remain invisible to other queue readers + (default=None which uses the Queues default) + + :type attributes: str + :param attributes: The name of additional attribute to return + with response or All if you want all attributes. The + default is to return no additional attributes. Valid + values: + * All + * SenderId + * SentTimestamp + * ApproximateReceiveCount + * ApproximateFirstReceiveTimestamp + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :rtype: list + :return: A list of :class:`boto.sqs.message.Message` objects. + + """ + params = {'MaxNumberOfMessages' : number_messages} + if visibility_timeout is not None: + params['VisibilityTimeout'] = visibility_timeout + if attributes is not None: + self.build_list_params(params, attributes, 'AttributeName') + if wait_time_seconds is not None: + params['WaitTimeSeconds'] = wait_time_seconds + return self.get_list('ReceiveMessage', params, + [('Message', queue.message_class)], + queue.id, queue) + + def delete_message(self, queue, message): + """ + Delete a message from a queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type message: A :class:`boto.sqs.message.Message` object + :param message: The Message to be deleted + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'ReceiptHandle' : message.receipt_handle} + return self.get_status('DeleteMessage', params, queue.id) + + def delete_message_batch(self, queue, messages): + """ + Deletes a list of messages from a queue in a single request. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of :class:`boto.sqs.message.Message` objects. + :param messages: A list of message objects. + """ + params = {} + for i, msg in enumerate(messages): + prefix = 'DeleteMessageBatchRequestEntry' + p_name = '%s.%i.Id' % (prefix, (i+1)) + params[p_name] = msg.id + p_name = '%s.%i.ReceiptHandle' % (prefix, (i+1)) + params[p_name] = msg.receipt_handle + return self.get_object('DeleteMessageBatch', params, BatchResults, + queue.id, verb='POST') + + def delete_message_from_handle(self, queue, receipt_handle): + """ + Delete a message from a queue, given a receipt handle. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type receipt_handle: str + :param receipt_handle: The receipt handle for the message + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'ReceiptHandle' : receipt_handle} + return self.get_status('DeleteMessage', params, queue.id) + + def send_message(self, queue, message_content, delay_seconds=None): + params = {'MessageBody' : message_content} + if delay_seconds: + params['DelaySeconds'] = int(delay_seconds) + return self.get_object('SendMessage', params, Message, + queue.id, verb='POST') + + def send_message_batch(self, queue, messages): + """ + Delivers up to 10 messages to a queue in a single request. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of lists. + :param messages: A list of lists or tuples. Each inner + tuple represents a single message to be written + and consists of and ID (string) that must be unique + within the list of messages, the message body itself + which can be a maximum of 64K in length, and an + integer which represents the delay time (in seconds) + for the message (0-900) before the message will + be delivered to the queue. + """ + params = {} + for i, msg in enumerate(messages): + p_name = 'SendMessageBatchRequestEntry.%i.Id' % (i+1) + params[p_name] = msg[0] + p_name = 'SendMessageBatchRequestEntry.%i.MessageBody' % (i+1) + params[p_name] = msg[1] + p_name = 'SendMessageBatchRequestEntry.%i.DelaySeconds' % (i+1) + params[p_name] = msg[2] + return self.get_object('SendMessageBatch', params, BatchResults, + queue.id, verb='POST') + + def change_message_visibility(self, queue, receipt_handle, + visibility_timeout): + """ + Extends the read lock timeout for the specified message from + the specified queue to the specified value. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type receipt_handle: str + :param queue: The receipt handle associated with the message whose + visibility timeout will be changed. + + :type visibility_timeout: int + :param visibility_timeout: The new value of the message's visibility + timeout in seconds. + """ + params = {'ReceiptHandle' : receipt_handle, + 'VisibilityTimeout' : visibility_timeout} + return self.get_status('ChangeMessageVisibility', params, queue.id) + + def change_message_visibility_batch(self, queue, messages): + """ + A batch version of change_message_visibility that can act + on up to 10 messages at a time. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of tuples. + :param messages: A list of tuples where each tuple consists + of a :class:`boto.sqs.message.Message` object and an integer + that represents the new visibility timeout for that message. + """ + params = {} + for i, t in enumerate(messages): + prefix = 'ChangeMessageVisibilityBatchRequestEntry' + p_name = '%s.%i.Id' % (prefix, (i+1)) + params[p_name] = t[0].id + p_name = '%s.%i.ReceiptHandle' % (prefix, (i+1)) + params[p_name] = t[0].receipt_handle + p_name = '%s.%i.VisibilityTimeout' % (prefix, (i+1)) + params[p_name] = t[1] + return self.get_object('ChangeMessageVisibilityBatch', + params, BatchResults, + queue.id, verb='POST') + + def get_all_queues(self, prefix=''): + """ + Retrieves all queues. + + :keyword str prefix: Optionally, only return queues that start with + this value. + :rtype: list + :returns: A list of :py:class:`boto.sqs.queue.Queue` instances. + """ + params = {} + if prefix: + params['QueueNamePrefix'] = prefix + return self.get_list('ListQueues', params, [('QueueUrl', Queue)]) + + def get_queue(self, queue_name): + """ + Retrieves the queue with the given name, or ``None`` if no match + was found. + + :param str queue_name: The name of the queue to retrieve. + :rtype: :py:class:`boto.sqs.queue.Queue` or ``None`` + :returns: The requested queue, or ``None`` if no match was found. + """ + params = {'QueueName': queue_name} + try: + return self.get_object('GetQueueUrl', params, Queue) + except SQSError: + return None + + lookup = get_queue + + # + # Permissions methods + # + + def add_permission(self, queue, label, aws_account_id, action_name): + """ + Add a permission to a queue. + + :type queue: :class:`boto.sqs.queue.Queue` + :param queue: The queue object + + :type label: str or unicode + :param label: A unique identification of the permission you are setting. + Maximum of 80 characters ``[0-9a-zA-Z_-]`` + Example, AliceSendMessage + + :type aws_account_id: str or unicode + :param principal_id: The AWS account number of the principal + who will be given permission. The principal must have an + AWS account, but does not need to be signed up for Amazon + SQS. For information about locating the AWS account + identification. + + :type action_name: str or unicode + :param action_name: The action. Valid choices are: + * * + * SendMessage + * ReceiveMessage + * DeleteMessage + * ChangeMessageVisibility + * GetQueueAttributes + + :rtype: bool + :return: True if successful, False otherwise. + + """ + params = {'Label': label, + 'AWSAccountId' : aws_account_id, + 'ActionName' : action_name} + return self.get_status('AddPermission', params, queue.id) + + def remove_permission(self, queue, label): + """ + Remove a permission from a queue. + + :type queue: :class:`boto.sqs.queue.Queue` + :param queue: The queue object + + :type label: str or unicode + :param label: The unique label associated with the permission + being removed. + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'Label': label} + return self.get_status('RemovePermission', params, queue.id) diff --git a/awx/lib/site-packages/boto/sqs/jsonmessage.py b/awx/lib/site-packages/boto/sqs/jsonmessage.py new file mode 100644 index 0000000000..0eb3a13621 --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/jsonmessage.py @@ -0,0 +1,43 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import base64 + +from boto.sqs.message import MHMessage +from boto.exception import SQSDecodeError +from boto.compat import json + + +class JSONMessage(MHMessage): + """ + Acts like a dictionary but encodes it's data as a Base64 encoded JSON payload. + """ + + def decode(self, value): + try: + value = base64.b64decode(value) + value = json.loads(value) + except: + raise SQSDecodeError('Unable to decode message', self) + return value + + def encode(self, value): + value = json.dumps(value) + return base64.b64encode(value) diff --git a/awx/lib/site-packages/boto/sqs/message.py b/awx/lib/site-packages/boto/sqs/message.py new file mode 100644 index 0000000000..f0666e5601 --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/message.py @@ -0,0 +1,256 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +SQS Message + +A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS +Message are here: + + http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html + +So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes +back out. However, to allow messages to have richer semantics, the Message class must support the +following interfaces: + +The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a +boto Queue object and represents the queue that the message will be stored in. The default value for +this parameter is None. + +The constructor for the Message class must accept a keyword parameter "body" which represents the +content or body of the message. The format of this parameter will depend on the behavior of the +particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the +user the body passed to the constructor should be a dict-like object that can be used to populate +the initial state of the message. + +The Message class must provide an encode method that accepts a value of the same type as the body +parameter of the constructor and returns a string of characters that are able to be stored in an +SQS message body (see rules above). + +The Message class must provide a decode method that accepts a string of characters that can be +stored (and probably were stored!) in an SQS message and return an object of a type that is consistent +with the "body" parameter accepted on the class constructor. + +The Message class must provide a __len__ method that will return the size of the encoded message +that would be stored in SQS based on the current state of the Message object. + +The Message class must provide a get_body method that will return the body of the message in the +same format accepted in the constructor of the class. + +The Message class must provide a set_body method that accepts a message body in the same format +accepted by the constructor of the class. This method should alter to the internal state of the +Message object to reflect the state represented in the message body parameter. + +The Message class must provide a get_body_encoded method that returns the current body of the message +in the format in which it would be stored in SQS. +""" + +import base64 +import StringIO +from boto.sqs.attributes import Attributes +from boto.exception import SQSDecodeError +import boto + +class RawMessage: + """ + Base class for SQS messages. RawMessage does not encode the message + in any way. Whatever you store in the body of the message is what + will be written to SQS and whatever is returned from SQS is stored + directly into the body of the message. + """ + + def __init__(self, queue=None, body=''): + self.queue = queue + self.set_body(body) + self.id = None + self.receipt_handle = None + self.md5 = None + self.attributes = Attributes(self) + + def __len__(self): + return len(self.encode(self._body)) + + def startElement(self, name, attrs, connection): + if name == 'Attribute': + return self.attributes + return None + + def endElement(self, name, value, connection): + if name == 'Body': + self.set_body(self.decode(value)) + elif name == 'MessageId': + self.id = value + elif name == 'ReceiptHandle': + self.receipt_handle = value + elif name == 'MD5OfMessageBody': + self.md5 = value + else: + setattr(self, name, value) + + def encode(self, value): + """Transform body object into serialized byte array format.""" + return value + + def decode(self, value): + """Transform seralized byte array into any object.""" + return value + + def set_body(self, body): + """Override the current body for this object, using decoded format.""" + self._body = body + + def get_body(self): + return self._body + + def get_body_encoded(self): + """ + This method is really a semi-private method used by the Queue.write + method when writing the contents of the message to SQS. + You probably shouldn't need to call this method in the normal course of events. + """ + return self.encode(self.get_body()) + + def delete(self): + if self.queue: + return self.queue.delete_message(self) + + def change_visibility(self, visibility_timeout): + if self.queue: + self.queue.connection.change_message_visibility(self.queue, + self.receipt_handle, + visibility_timeout) + +class Message(RawMessage): + """ + The default Message class used for SQS queues. This class automatically + encodes/decodes the message body using Base64 encoding to avoid any + illegal characters in the message body. See: + + https://forums.aws.amazon.com/thread.jspa?threadID=13067 + + for details on why this is a good idea. The encode/decode is meant to + be transparent to the end-user. + """ + + def encode(self, value): + return base64.b64encode(value) + + def decode(self, value): + try: + value = base64.b64decode(value) + except: + boto.log.warning('Unable to decode message') + return value + return value + +class MHMessage(Message): + """ + The MHMessage class provides a message that provides RFC821-like + headers like this: + + HeaderName: HeaderValue + + The encoding/decoding of this is handled automatically and after + the message body has been read, the message instance can be treated + like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'. + """ + + def __init__(self, queue=None, body=None, xml_attrs=None): + if body == None or body == '': + body = {} + Message.__init__(self, queue, body) + + def decode(self, value): + try: + msg = {} + fp = StringIO.StringIO(value) + line = fp.readline() + while line: + delim = line.find(':') + key = line[0:delim] + value = line[delim+1:].strip() + msg[key.strip()] = value.strip() + line = fp.readline() + except: + raise SQSDecodeError('Unable to decode message', self) + return msg + + def encode(self, value): + s = '' + for item in value.items(): + s = s + '%s: %s\n' % (item[0], item[1]) + return s + + def __contains__(self, key): + return key in self._body + + def __getitem__(self, key): + if key in self._body: + return self._body[key] + else: + raise KeyError(key) + + def __setitem__(self, key, value): + self._body[key] = value + self.set_body(self._body) + + def keys(self): + return self._body.keys() + + def values(self): + return self._body.values() + + def items(self): + return self._body.items() + + def has_key(self, key): + return key in self._body + + def update(self, d): + self._body.update(d) + self.set_body(self._body) + + def get(self, key, default=None): + return self._body.get(key, default) + +class EncodedMHMessage(MHMessage): + """ + The EncodedMHMessage class provides a message that provides RFC821-like + headers like this: + + HeaderName: HeaderValue + + This variation encodes/decodes the body of the message in base64 automatically. + The message instance can be treated like a mapping object, + i.e. m['HeaderName'] would return 'HeaderValue'. + """ + + def decode(self, value): + try: + value = base64.b64decode(value) + except: + raise SQSDecodeError('Unable to decode message', self) + return MHMessage.decode(self, value) + + def encode(self, value): + value = MHMessage.encode(self, value) + return base64.b64encode(value) + diff --git a/awx/lib/site-packages/boto/sqs/queue.py b/awx/lib/site-packages/boto/sqs/queue.py new file mode 100644 index 0000000000..603faaaea6 --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/queue.py @@ -0,0 +1,478 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS Queue +""" + +import urlparse +from boto.sqs.message import Message + + +class Queue: + + def __init__(self, connection=None, url=None, message_class=Message): + self.connection = connection + self.url = url + self.message_class = message_class + self.visibility_timeout = None + + def __repr__(self): + return 'Queue(%s)' % self.url + + def _id(self): + if self.url: + val = urlparse.urlparse(self.url)[2] + else: + val = self.url + return val + id = property(_id) + + def _name(self): + if self.url: + val = urlparse.urlparse(self.url)[2].split('/')[2] + else: + val = self.url + return val + name = property(_name) + + def _arn(self): + parts = self.id.split('/') + return 'arn:aws:sqs:%s:%s:%s' % ( + self.connection.region.name, parts[1], parts[2]) + arn = property(_arn) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'QueueUrl': + self.url = value + elif name == 'VisibilityTimeout': + self.visibility_timeout = int(value) + else: + setattr(self, name, value) + + def set_message_class(self, message_class): + """ + Set the message class that should be used when instantiating + messages read from the queue. By default, the class + :class:`boto.sqs.message.Message` is used but this can be overriden + with any class that behaves like a message. + + :type message_class: Message-like class + :param message_class: The new Message class + """ + self.message_class = message_class + + def get_attributes(self, attributes='All'): + """ + Retrieves attributes about this queue object and returns + them in an Attribute instance (subclass of a Dictionary). + + :type attributes: string + :param attributes: String containing one of: + ApproximateNumberOfMessages, + ApproximateNumberOfMessagesNotVisible, + VisibilityTimeout, + CreatedTimestamp, + LastModifiedTimestamp, + Policy + ReceiveMessageWaitTimeSeconds + :rtype: Attribute object + :return: An Attribute object which is a mapping type holding the + requested name/value pairs + """ + return self.connection.get_queue_attributes(self, attributes) + + def set_attribute(self, attribute, value): + """ + Set a new value for an attribute of the Queue. + + :type attribute: String + :param attribute: The name of the attribute you want to set. The + only valid value at this time is: VisibilityTimeout + :type value: int + :param value: The new value for the attribute. + For VisibilityTimeout the value must be an + integer number of seconds from 0 to 86400. + + :rtype: bool + :return: True if successful, otherwise False. + """ + return self.connection.set_queue_attribute(self, attribute, value) + + def get_timeout(self): + """ + Get the visibility timeout for the queue. + + :rtype: int + :return: The number of seconds as an integer. + """ + a = self.get_attributes('VisibilityTimeout') + return int(a['VisibilityTimeout']) + + def set_timeout(self, visibility_timeout): + """ + Set the visibility timeout for the queue. + + :type visibility_timeout: int + :param visibility_timeout: The desired timeout in seconds + """ + retval = self.set_attribute('VisibilityTimeout', visibility_timeout) + if retval: + self.visibility_timeout = visibility_timeout + return retval + + def add_permission(self, label, aws_account_id, action_name): + """ + Add a permission to a queue. + + :type label: str or unicode + :param label: A unique identification of the permission you are setting. + Maximum of 80 characters ``[0-9a-zA-Z_-]`` + Example, AliceSendMessage + + :type aws_account_id: str or unicode + :param principal_id: The AWS account number of the principal who + will be given permission. The principal must have an AWS account, + but does not need to be signed up for Amazon SQS. For information + about locating the AWS account identification. + + :type action_name: str or unicode + :param action_name: The action. Valid choices are: + SendMessage|ReceiveMessage|DeleteMessage| + ChangeMessageVisibility|GetQueueAttributes|* + + :rtype: bool + :return: True if successful, False otherwise. + + """ + return self.connection.add_permission(self, label, aws_account_id, + action_name) + + def remove_permission(self, label): + """ + Remove a permission from a queue. + + :type label: str or unicode + :param label: The unique label associated with the permission + being removed. + + :rtype: bool + :return: True if successful, False otherwise. + """ + return self.connection.remove_permission(self, label) + + def read(self, visibility_timeout=None, wait_time_seconds=None): + """ + Read a single message from the queue. + + :type visibility_timeout: int + :param visibility_timeout: The timeout for this message in seconds + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :rtype: :class:`boto.sqs.message.Message` + :return: A single message or None if queue is empty + """ + rs = self.get_messages(1, visibility_timeout, + wait_time_seconds=wait_time_seconds) + if len(rs) == 1: + return rs[0] + else: + return None + + def write(self, message, delay_seconds=None): + """ + Add a single message to the queue. + + :type message: Message + :param message: The message to be written to the queue + + :rtype: :class:`boto.sqs.message.Message` + :return: The :class:`boto.sqs.message.Message` object that was written. + """ + new_msg = self.connection.send_message(self, + message.get_body_encoded(), + delay_seconds) + message.id = new_msg.id + message.md5 = new_msg.md5 + return message + + def write_batch(self, messages): + """ + Delivers up to 10 messages in a single request. + + :type messages: List of lists. + :param messages: A list of lists or tuples. Each inner + tuple represents a single message to be written + and consists of and ID (string) that must be unique + within the list of messages, the message body itself + which can be a maximum of 64K in length, and an + integer which represents the delay time (in seconds) + for the message (0-900) before the message will + be delivered to the queue. + """ + return self.connection.send_message_batch(self, messages) + + def new_message(self, body=''): + """ + Create new message of appropriate class. + + :type body: message body + :param body: The body of the newly created message (optional). + + :rtype: :class:`boto.sqs.message.Message` + :return: A new Message object + """ + m = self.message_class(self, body) + m.queue = self + return m + + # get a variable number of messages, returns a list of messages + def get_messages(self, num_messages=1, visibility_timeout=None, + attributes=None, wait_time_seconds=None): + """ + Get a variable number of messages. + + :type num_messages: int + :param num_messages: The maximum number of messages to read from + the queue. + + :type visibility_timeout: int + :param visibility_timeout: The VisibilityTimeout for the messages read. + + :type attributes: str + :param attributes: The name of additional attribute to return + with response or All if you want all attributes. The + default is to return no additional attributes. Valid + values: All SenderId SentTimestamp ApproximateReceiveCount + ApproximateFirstReceiveTimestamp + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :rtype: list + :return: A list of :class:`boto.sqs.message.Message` objects. + """ + return self.connection.receive_message( + self, number_messages=num_messages, + visibility_timeout=visibility_timeout, attributes=attributes, + wait_time_seconds=wait_time_seconds) + + def delete_message(self, message): + """ + Delete a message from the queue. + + :type message: :class:`boto.sqs.message.Message` + :param message: The :class:`boto.sqs.message.Message` object to delete. + + :rtype: bool + :return: True if successful, False otherwise + """ + return self.connection.delete_message(self, message) + + def delete_message_batch(self, messages): + """ + Deletes a list of messages in a single request. + + :type messages: List of :class:`boto.sqs.message.Message` objects. + :param messages: A list of message objects. + """ + return self.connection.delete_message_batch(self, messages) + + def change_message_visibility_batch(self, messages): + """ + A batch version of change_message_visibility that can act + on up to 10 messages at a time. + + :type messages: List of tuples. + :param messages: A list of tuples where each tuple consists + of a :class:`boto.sqs.message.Message` object and an integer + that represents the new visibility timeout for that message. + """ + return self.connection.change_message_visibility_batch(self, messages) + + def delete(self): + """ + Delete the queue. + """ + return self.connection.delete_queue(self) + + def clear(self, page_size=10, vtimeout=10): + """Utility function to remove all messages from a queue""" + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + self.delete_message(m) + n += 1 + l = self.get_messages(page_size, vtimeout) + return n + + def count(self, page_size=10, vtimeout=10): + """ + Utility function to count the number of messages in a queue. + Note: This function now calls GetQueueAttributes to obtain + an 'approximate' count of the number of messages in a queue. + """ + a = self.get_attributes('ApproximateNumberOfMessages') + return int(a['ApproximateNumberOfMessages']) + + def count_slow(self, page_size=10, vtimeout=10): + """ + Deprecated. This is the old 'count' method that actually counts + the messages by reading them all. This gives an accurate count but + is very slow for queues with non-trivial number of messasges. + Instead, use get_attribute('ApproximateNumberOfMessages') to take + advantage of the new SQS capability. This is retained only for + the unit tests. + """ + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + n += 1 + l = self.get_messages(page_size, vtimeout) + return n + + def dump(self, file_name, page_size=10, vtimeout=10, sep='\n'): + """Utility function to dump the messages in a queue to a file + NOTE: Page size must be < 10 else SQS errors""" + fp = open(file_name, 'wb') + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + fp.write(m.get_body()) + if sep: + fp.write(sep) + n += 1 + l = self.get_messages(page_size, vtimeout) + fp.close() + return n + + def save_to_file(self, fp, sep='\n'): + """ + Read all messages from the queue and persist them to file-like object. + Messages are written to the file and the 'sep' string is written + in between messages. Messages are deleted from the queue after + being written to the file. + Returns the number of messages saved. + """ + n = 0 + m = self.read() + while m: + n += 1 + fp.write(m.get_body()) + if sep: + fp.write(sep) + self.delete_message(m) + m = self.read() + return n + + def save_to_filename(self, file_name, sep='\n'): + """ + Read all messages from the queue and persist them to local file. + Messages are written to the file and the 'sep' string is written + in between messages. Messages are deleted from the queue after + being written to the file. + Returns the number of messages saved. + """ + fp = open(file_name, 'wb') + n = self.save_to_file(fp, sep) + fp.close() + return n + + # for backwards compatibility + save = save_to_filename + + def save_to_s3(self, bucket): + """ + Read all messages from the queue and persist them to S3. + Messages are stored in the S3 bucket using a naming scheme of:: + + / + + Messages are deleted from the queue after being saved to S3. + Returns the number of messages saved. + """ + n = 0 + m = self.read() + while m: + n += 1 + key = bucket.new_key('%s/%s' % (self.id, m.id)) + key.set_contents_from_string(m.get_body()) + self.delete_message(m) + m = self.read() + return n + + def load_from_s3(self, bucket, prefix=None): + """ + Load messages previously saved to S3. + """ + n = 0 + if prefix: + prefix = '%s/' % prefix + else: + prefix = '%s/' % self.id[1:] + rs = bucket.list(prefix=prefix) + for key in rs: + n += 1 + m = self.new_message(key.get_contents_as_string()) + self.write(m) + return n + + def load_from_file(self, fp, sep='\n'): + """Utility function to load messages from a file-like object to a queue""" + n = 0 + body = '' + l = fp.readline() + while l: + if l == sep: + m = Message(self, body) + self.write(m) + n += 1 + print 'writing message %d' % n + body = '' + else: + body = body + l + l = fp.readline() + return n + + def load_from_filename(self, file_name, sep='\n'): + """Utility function to load messages from a local filename to a queue""" + fp = open(file_name, 'rb') + n = self.load_from_file(fp, sep) + fp.close() + return n + + # for backward compatibility + load = load_from_filename + diff --git a/awx/lib/site-packages/boto/sqs/regioninfo.py b/awx/lib/site-packages/boto/sqs/regioninfo.py new file mode 100644 index 0000000000..66d6733629 --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/regioninfo.py @@ -0,0 +1,32 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class SQSRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None): + from boto.sqs.connection import SQSConnection + RegionInfo.__init__(self, connection, name, endpoint, + SQSConnection) diff --git a/awx/lib/site-packages/boto/storage_uri.py b/awx/lib/site-packages/boto/storage_uri.py new file mode 100644 index 0000000000..40fee47327 --- /dev/null +++ b/awx/lib/site-packages/boto/storage_uri.py @@ -0,0 +1,888 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import os +import sys +import textwrap +from boto.s3.deletemarker import DeleteMarker +from boto.exception import BotoClientError +from boto.exception import InvalidUriError + + +class StorageUri(object): + """ + Base class for representing storage provider-independent bucket and + object name with a shorthand URI-like syntax. + + This is an abstract class: the constructor cannot be called (throws an + exception if you try). + """ + + connection = None + # Optional args that can be set from one of the concrete subclass + # constructors, to change connection behavior (e.g., to override + # https_connection_factory). + connection_args = None + + # Map of provider scheme ('s3' or 'gs') to AWSAuthConnection object. We + # maintain a pool here in addition to the connection pool implemented + # in AWSAuthConnection because the latter re-creates its connection pool + # every time that class is instantiated (so the current pool is used to + # avoid re-instantiating AWSAuthConnection). + provider_pool = {} + + def __init__(self): + """Uncallable constructor on abstract base StorageUri class. + """ + raise BotoClientError('Attempt to instantiate abstract StorageUri ' + 'class') + + def __repr__(self): + """Returns string representation of URI.""" + return self.uri + + def equals(self, uri): + """Returns true if two URIs are equal.""" + return self.uri == uri.uri + + def check_response(self, resp, level, uri): + if resp is None: + raise InvalidUriError('\n'.join(textwrap.wrap( + 'Attempt to get %s for "%s" failed. This can happen if ' + 'the URI refers to a non-existent object or if you meant to ' + 'operate on a directory (e.g., leaving off -R option on gsutil ' + 'cp, mv, or ls of a bucket)' % (level, uri), 80))) + + def _check_bucket_uri(self, function_name): + if issubclass(type(self), BucketStorageUri) and not self.bucket_name: + raise InvalidUriError( + '%s on bucket-less URI (%s)' % (function_name, self.uri)) + + def _check_object_uri(self, function_name): + if issubclass(type(self), BucketStorageUri) and not self.object_name: + raise InvalidUriError('%s on object-less URI (%s)' % + (function_name, self.uri)) + + def _warn_about_args(self, function_name, **args): + for arg in args: + if args[arg]: + sys.stderr.write( + 'Warning: %s ignores argument: %s=%s\n' % + (function_name, arg, str(args[arg]))) + + def connect(self, access_key_id=None, secret_access_key=None, **kwargs): + """ + Opens a connection to appropriate provider, depending on provider + portion of URI. Requires Credentials defined in boto config file (see + boto/pyami/config.py). + @type storage_uri: StorageUri + @param storage_uri: StorageUri specifying a bucket or a bucket+object + @rtype: L{AWSAuthConnection} + @return: A connection to storage service provider of the given URI. + """ + connection_args = dict(self.connection_args or ()) + + if (hasattr(self, 'suppress_consec_slashes') and + 'suppress_consec_slashes' not in connection_args): + connection_args['suppress_consec_slashes'] = ( + self.suppress_consec_slashes) + connection_args.update(kwargs) + if not self.connection: + if self.scheme in self.provider_pool: + self.connection = self.provider_pool[self.scheme] + elif self.scheme == 's3': + from boto.s3.connection import S3Connection + self.connection = S3Connection(access_key_id, + secret_access_key, + **connection_args) + self.provider_pool[self.scheme] = self.connection + elif self.scheme == 'gs': + from boto.gs.connection import GSConnection + # Use OrdinaryCallingFormat instead of boto-default + # SubdomainCallingFormat because the latter changes the hostname + # that's checked during cert validation for HTTPS connections, + # which will fail cert validation (when cert validation is + # enabled). + # + # The same is not true for S3's HTTPS certificates. In fact, + # we don't want to do this for S3 because S3 requires the + # subdomain to match the location of the bucket. If the proper + # subdomain is not used, the server will return a 301 redirect + # with no Location header. + # + # Note: the following import can't be moved up to the + # start of this file else it causes a config import failure when + # run from the resumable upload/download tests. + from boto.s3.connection import OrdinaryCallingFormat + connection_args['calling_format'] = OrdinaryCallingFormat() + self.connection = GSConnection(access_key_id, + secret_access_key, + **connection_args) + self.provider_pool[self.scheme] = self.connection + elif self.scheme == 'file': + from boto.file.connection import FileConnection + self.connection = FileConnection(self) + else: + raise InvalidUriError('Unrecognized scheme "%s"' % + self.scheme) + self.connection.debug = self.debug + return self.connection + + def has_version(self): + return (issubclass(type(self), BucketStorageUri) + and ((self.version_id is not None) + or (self.generation is not None))) + + def delete_key(self, validate=False, headers=None, version_id=None, + mfa_token=None): + self._check_object_uri('delete_key') + bucket = self.get_bucket(validate, headers) + return bucket.delete_key(self.object_name, headers, version_id, + mfa_token) + + def list_bucket(self, prefix='', delimiter='', headers=None, + all_versions=False): + self._check_bucket_uri('list_bucket') + bucket = self.get_bucket(headers=headers) + if all_versions: + return (v for v in bucket.list_versions( + prefix=prefix, delimiter=delimiter, headers=headers) + if not isinstance(v, DeleteMarker)) + else: + return bucket.list(prefix=prefix, delimiter=delimiter, + headers=headers) + + def get_all_keys(self, validate=False, headers=None, prefix=None): + bucket = self.get_bucket(validate, headers) + return bucket.get_all_keys(headers) + + def get_bucket(self, validate=False, headers=None): + self._check_bucket_uri('get_bucket') + conn = self.connect() + bucket = conn.get_bucket(self.bucket_name, validate, headers) + self.check_response(bucket, 'bucket', self.uri) + return bucket + + def get_key(self, validate=False, headers=None, version_id=None): + self._check_object_uri('get_key') + bucket = self.get_bucket(validate, headers) + key = bucket.get_key(self.object_name, headers, version_id) + self.check_response(key, 'key', self.uri) + return key + + def new_key(self, validate=False, headers=None): + self._check_object_uri('new_key') + bucket = self.get_bucket(validate, headers) + return bucket.new_key(self.object_name) + + def get_contents_to_stream(self, fp, headers=None, version_id=None): + self._check_object_uri('get_key') + self._warn_about_args('get_key', validate=False) + key = self.get_key(None, headers) + self.check_response(key, 'key', self.uri) + return key.get_contents_to_file(fp, headers, version_id=version_id) + + def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, + res_download_handler=None, response_headers=None, + hash_algs=None): + self._check_object_uri('get_contents_to_file') + key = self.get_key(None, headers) + self.check_response(key, 'key', self.uri) + if hash_algs: + key.get_contents_to_file(fp, headers, cb, num_cb, torrent, + version_id, res_download_handler, + response_headers, + hash_algs=hash_algs) + else: + key.get_contents_to_file(fp, headers, cb, num_cb, torrent, + version_id, res_download_handler, + response_headers) + + def get_contents_as_string(self, validate=False, headers=None, cb=None, + num_cb=10, torrent=False, version_id=None): + self._check_object_uri('get_contents_as_string') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + return key.get_contents_as_string(headers, cb, num_cb, torrent, + version_id) + + def acl_class(self): + conn = self.connect() + acl_class = conn.provider.acl_class + self.check_response(acl_class, 'acl_class', self.uri) + return acl_class + + def canned_acls(self): + conn = self.connect() + canned_acls = conn.provider.canned_acls + self.check_response(canned_acls, 'canned_acls', self.uri) + return canned_acls + + +class BucketStorageUri(StorageUri): + """ + StorageUri subclass that handles bucket storage providers. + Callers should instantiate this class by calling boto.storage_uri(). + """ + + delim = '/' + capabilities = set([]) # A set of additional capabilities. + + def __init__(self, scheme, bucket_name=None, object_name=None, + debug=0, connection_args=None, suppress_consec_slashes=True, + version_id=None, generation=None, is_latest=False): + """Instantiate a BucketStorageUri from scheme,bucket,object tuple. + + @type scheme: string + @param scheme: URI scheme naming the storage provider (gs, s3, etc.) + @type bucket_name: string + @param bucket_name: bucket name + @type object_name: string + @param object_name: object name, excluding generation/version. + @type debug: int + @param debug: debug level to pass in to connection (range 0..2) + @type connection_args: map + @param connection_args: optional map containing args to be + passed to {S3,GS}Connection constructor (e.g., to override + https_connection_factory). + @param suppress_consec_slashes: If provided, controls whether + consecutive slashes will be suppressed in key paths. + @param version_id: Object version id (S3-specific). + @param generation: Object generation number (GCS-specific). + @param is_latest: boolean indicating that a versioned object is the + current version + + After instantiation the components are available in the following + fields: scheme, bucket_name, object_name, version_id, generation, + is_latest, versionless_uri, version_specific_uri, uri. + Note: If instantiated without version info, the string representation + for a URI stays versionless; similarly, if instantiated with version + info, the string representation for a URI stays version-specific. If you + call one of the uri.set_contents_from_xyz() methods, a specific object + version will be created, and its version-specific URI string can be + retrieved from version_specific_uri even if the URI was instantiated + without version info. + """ + + self.scheme = scheme + self.bucket_name = bucket_name + self.object_name = object_name + self.debug = debug + if connection_args: + self.connection_args = connection_args + self.suppress_consec_slashes = suppress_consec_slashes + self.version_id = version_id + self.generation = generation and int(generation) + self.is_latest = is_latest + self.is_version_specific = bool(self.generation) or bool(version_id) + self._build_uri_strings() + + def _build_uri_strings(self): + if self.bucket_name and self.object_name: + self.versionless_uri = '%s://%s/%s' % (self.scheme, self.bucket_name, + self.object_name) + if self.generation: + self.version_specific_uri = '%s#%s' % (self.versionless_uri, + self.generation) + elif self.version_id: + self.version_specific_uri = '%s#%s' % ( + self.versionless_uri, self.version_id) + if self.is_version_specific: + self.uri = self.version_specific_uri + else: + self.uri = self.versionless_uri + elif self.bucket_name: + self.uri = ('%s://%s/' % (self.scheme, self.bucket_name)) + else: + self.uri = ('%s://' % self.scheme) + + def _update_from_key(self, key): + self._update_from_values( + getattr(key, 'version_id', None), + getattr(key, 'generation', None), + getattr(key, 'is_latest', None), + getattr(key, 'md5', None)) + + def _update_from_values(self, version_id, generation, is_latest, md5): + self.version_id = version_id + self.generation = generation + self.is_latest = is_latest + self._build_uri_strings() + self.md5 = md5 + + def get_key(self, validate=False, headers=None, version_id=None): + self._check_object_uri('get_key') + bucket = self.get_bucket(validate, headers) + if self.get_provider().name == 'aws': + key = bucket.get_key(self.object_name, headers, + version_id=(version_id or self.version_id)) + elif self.get_provider().name == 'google': + key = bucket.get_key(self.object_name, headers, + generation=self.generation) + self.check_response(key, 'key', self.uri) + return key + + def delete_key(self, validate=False, headers=None, version_id=None, + mfa_token=None): + self._check_object_uri('delete_key') + bucket = self.get_bucket(validate, headers) + if self.get_provider().name == 'aws': + version_id = version_id or self.version_id + return bucket.delete_key(self.object_name, headers, version_id, + mfa_token) + elif self.get_provider().name == 'google': + return bucket.delete_key(self.object_name, headers, + generation=self.generation) + + def clone_replace_name(self, new_name): + """Instantiate a BucketStorageUri from the current BucketStorageUri, + but replacing the object_name. + + @type new_name: string + @param new_name: new object name + """ + self._check_bucket_uri('clone_replace_name') + return BucketStorageUri( + self.scheme, bucket_name=self.bucket_name, object_name=new_name, + debug=self.debug, + suppress_consec_slashes=self.suppress_consec_slashes) + + def clone_replace_key(self, key): + """Instantiate a BucketStorageUri from the current BucketStorageUri, by + replacing the object name with the object name and other metadata found + in the given Key object (including generation). + + @type key: Key + @param key: key for the new StorageUri to represent + """ + self._check_bucket_uri('clone_replace_key') + version_id = None + generation = None + is_latest = False + if hasattr(key, 'version_id'): + version_id = key.version_id + if hasattr(key, 'generation'): + generation = key.generation + if hasattr(key, 'is_latest'): + is_latest = key.is_latest + + return BucketStorageUri( + key.provider.get_provider_name(), + bucket_name=key.bucket.name, + object_name=key.name, + debug=self.debug, + suppress_consec_slashes=self.suppress_consec_slashes, + version_id=version_id, + generation=generation, + is_latest=is_latest) + + def get_acl(self, validate=False, headers=None, version_id=None): + """returns a bucket's acl""" + self._check_bucket_uri('get_acl') + bucket = self.get_bucket(validate, headers) + # This works for both bucket- and object- level ACLs (former passes + # key_name=None): + key_name = self.object_name or '' + if self.get_provider().name == 'aws': + version_id = version_id or self.version_id + acl = bucket.get_acl(key_name, headers, version_id) + else: + acl = bucket.get_acl(key_name, headers, generation=self.generation) + self.check_response(acl, 'acl', self.uri) + return acl + + def get_def_acl(self, validate=False, headers=None): + """returns a bucket's default object acl""" + self._check_bucket_uri('get_def_acl') + bucket = self.get_bucket(validate, headers) + acl = bucket.get_def_acl(headers) + self.check_response(acl, 'acl', self.uri) + return acl + + def get_cors(self, validate=False, headers=None): + """returns a bucket's CORS XML""" + self._check_bucket_uri('get_cors') + bucket = self.get_bucket(validate, headers) + cors = bucket.get_cors(headers) + self.check_response(cors, 'cors', self.uri) + return cors + + def set_cors(self, cors, validate=False, headers=None): + """sets or updates a bucket's CORS XML""" + self._check_bucket_uri('set_cors ') + bucket = self.get_bucket(validate, headers) + bucket.set_cors(cors.to_xml(), headers) + + def get_location(self, validate=False, headers=None): + self._check_bucket_uri('get_location') + bucket = self.get_bucket(validate, headers) + return bucket.get_location() + + def get_storage_class(self, validate=False, headers=None): + self._check_bucket_uri('get_storage_class') + # StorageClass is defined as a bucket param for GCS, but as a key + # param for S3. + if self.scheme != 'gs': + raise ValueError('get_storage_class() not supported for %s ' + 'URIs.' % self.scheme) + bucket = self.get_bucket(validate, headers) + return bucket.get_storage_class() + + def get_subresource(self, subresource, validate=False, headers=None, + version_id=None): + self._check_bucket_uri('get_subresource') + bucket = self.get_bucket(validate, headers) + return bucket.get_subresource(subresource, self.object_name, headers, + version_id) + + def add_group_email_grant(self, permission, email_address, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_group_email_grant') + if self.scheme != 'gs': + raise ValueError('add_group_email_grant() not supported for %s ' + 'URIs.' % self.scheme) + if self.object_name: + if recursive: + raise ValueError('add_group_email_grant() on key-ful URI cannot ' + 'specify recursive=True') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_group_email_grant(permission, email_address, headers) + elif self.bucket_name: + bucket = self.get_bucket(validate, headers) + bucket.add_group_email_grant(permission, email_address, recursive, + headers) + else: + raise InvalidUriError('add_group_email_grant() on bucket-less URI ' + '%s' % self.uri) + + def add_email_grant(self, permission, email_address, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_email_grant') + if not self.object_name: + bucket = self.get_bucket(validate, headers) + bucket.add_email_grant(permission, email_address, recursive, + headers) + else: + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_email_grant(permission, email_address) + + def add_user_grant(self, permission, user_id, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_user_grant') + if not self.object_name: + bucket = self.get_bucket(validate, headers) + bucket.add_user_grant(permission, user_id, recursive, headers) + else: + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_user_grant(permission, user_id) + + def list_grants(self, headers=None): + self._check_bucket_uri('list_grants ') + bucket = self.get_bucket(headers) + return bucket.list_grants(headers) + + def is_file_uri(self): + """Returns True if this URI names a file or directory.""" + return False + + def is_cloud_uri(self): + """Returns True if this URI names a bucket or object.""" + return True + + def names_container(self): + """ + Returns True if this URI names a directory or bucket. Will return + False for bucket subdirs; providing bucket subdir semantics needs to + be done by the caller (like gsutil does). + """ + return bool(not self.object_name) + + def names_singleton(self): + """Returns True if this URI names a file or object.""" + return bool(self.object_name) + + def names_directory(self): + """Returns True if this URI names a directory.""" + return False + + def names_provider(self): + """Returns True if this URI names a provider.""" + return bool(not self.bucket_name) + + def names_bucket(self): + """Returns True if this URI names a bucket.""" + return bool(self.bucket_name) and bool(not self.object_name) + + def names_file(self): + """Returns True if this URI names a file.""" + return False + + def names_object(self): + """Returns True if this URI names an object.""" + return self.names_singleton() + + def is_stream(self): + """Returns True if this URI represents input/output stream.""" + return False + + def create_bucket(self, headers=None, location='', policy=None, + storage_class=None): + self._check_bucket_uri('create_bucket ') + conn = self.connect() + # Pass storage_class param only if this is a GCS bucket. (In S3 the + # storage class is specified on the key object.) + if self.scheme == 'gs': + return conn.create_bucket(self.bucket_name, headers, location, policy, + storage_class) + else: + return conn.create_bucket(self.bucket_name, headers, location, policy) + + def delete_bucket(self, headers=None): + self._check_bucket_uri('delete_bucket') + conn = self.connect() + return conn.delete_bucket(self.bucket_name, headers) + + def get_all_buckets(self, headers=None): + conn = self.connect() + return conn.get_all_buckets(headers) + + def get_provider(self): + conn = self.connect() + provider = conn.provider + self.check_response(provider, 'provider', self.uri) + return provider + + def set_acl(self, acl_or_str, key_name='', validate=False, headers=None, + version_id=None, if_generation=None, if_metageneration=None): + """Sets or updates a bucket's ACL.""" + self._check_bucket_uri('set_acl') + key_name = key_name or self.object_name or '' + bucket = self.get_bucket(validate, headers) + if self.generation: + bucket.set_acl( + acl_or_str, key_name, headers, generation=self.generation, + if_generation=if_generation, if_metageneration=if_metageneration) + else: + version_id = version_id or self.version_id + bucket.set_acl(acl_or_str, key_name, headers, version_id) + + def set_xml_acl(self, xmlstring, key_name='', validate=False, headers=None, + version_id=None, if_generation=None, if_metageneration=None): + """Sets or updates a bucket's ACL with an XML string.""" + self._check_bucket_uri('set_xml_acl') + key_name = key_name or self.object_name or '' + bucket = self.get_bucket(validate, headers) + if self.generation: + bucket.set_xml_acl( + xmlstring, key_name, headers, generation=self.generation, + if_generation=if_generation, if_metageneration=if_metageneration) + else: + version_id = version_id or self.version_id + bucket.set_xml_acl(xmlstring, key_name, headers, + version_id=version_id) + + def set_def_xml_acl(self, xmlstring, validate=False, headers=None): + """Sets or updates a bucket's default object ACL with an XML string.""" + self._check_bucket_uri('set_def_xml_acl') + self.get_bucket(validate, headers).set_def_xml_acl(xmlstring, headers) + + def set_def_acl(self, acl_or_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's default object ACL.""" + self._check_bucket_uri('set_def_acl') + self.get_bucket(validate, headers).set_def_acl(acl_or_str, headers) + + def set_canned_acl(self, acl_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's acl to a predefined (canned) value.""" + self._check_object_uri('set_canned_acl') + self._warn_about_args('set_canned_acl', version_id=version_id) + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.set_canned_acl(acl_str, headers) + + def set_def_canned_acl(self, acl_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's default object acl to a predefined + (canned) value.""" + self._check_bucket_uri('set_def_canned_acl ') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.set_def_canned_acl(acl_str, headers, version_id) + + def set_subresource(self, subresource, value, validate=False, headers=None, + version_id=None): + self._check_bucket_uri('set_subresource') + bucket = self.get_bucket(validate, headers) + bucket.set_subresource(subresource, value, self.object_name, headers, + version_id) + + def set_contents_from_string(self, s, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False): + self._check_object_uri('set_contents_from_string') + key = self.new_key(headers=headers) + if self.scheme == 'gs': + if reduced_redundancy: + sys.stderr.write('Warning: GCS does not support ' + 'reduced_redundancy; argument ignored by ' + 'set_contents_from_string') + result = key.set_contents_from_string( + s, headers, replace, cb, num_cb, policy, md5) + else: + result = key.set_contents_from_string( + s, headers, replace, cb, num_cb, policy, md5, + reduced_redundancy) + self._update_from_key(key) + return result + + def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, + num_cb=10, policy=None, md5=None, size=None, + rewind=False, res_upload_handler=None): + self._check_object_uri('set_contents_from_file') + key = self.new_key(headers=headers) + if self.scheme == 'gs': + result = key.set_contents_from_file( + fp, headers, replace, cb, num_cb, policy, md5, size=size, + rewind=rewind, res_upload_handler=res_upload_handler) + if res_upload_handler: + self._update_from_values(None, res_upload_handler.generation, + None, md5) + else: + self._warn_about_args('set_contents_from_file', + res_upload_handler=res_upload_handler) + result = key.set_contents_from_file( + fp, headers, replace, cb, num_cb, policy, md5, size=size, + rewind=rewind) + self._update_from_key(key) + return result + + def set_contents_from_stream(self, fp, headers=None, replace=True, cb=None, + policy=None, reduced_redundancy=False): + self._check_object_uri('set_contents_from_stream') + dst_key = self.new_key(False, headers) + result = dst_key.set_contents_from_stream( + fp, headers, replace, cb, policy=policy, + reduced_redundancy=reduced_redundancy) + self._update_from_key(dst_key) + return result + + def copy_key(self, src_bucket_name, src_key_name, metadata=None, + src_version_id=None, storage_class='STANDARD', + preserve_acl=False, encrypt_key=False, headers=None, + query_args=None, src_generation=None): + """Returns newly created key.""" + self._check_object_uri('copy_key') + dst_bucket = self.get_bucket(validate=False, headers=headers) + if src_generation: + return dst_bucket.copy_key(new_key_name=self.object_name, + src_bucket_name=src_bucket_name, + src_key_name=src_key_name, metadata=metadata, + storage_class=storage_class, preserve_acl=preserve_acl, + encrypt_key=encrypt_key, headers=headers, query_args=query_args, + src_generation=src_generation) + else: + return dst_bucket.copy_key(new_key_name=self.object_name, + src_bucket_name=src_bucket_name, src_key_name=src_key_name, + metadata=metadata, src_version_id=src_version_id, + storage_class=storage_class, preserve_acl=preserve_acl, + encrypt_key=encrypt_key, headers=headers, query_args=query_args) + + def enable_logging(self, target_bucket, target_prefix=None, validate=False, + headers=None, version_id=None): + self._check_bucket_uri('enable_logging') + bucket = self.get_bucket(validate, headers) + bucket.enable_logging(target_bucket, target_prefix, headers=headers) + + def disable_logging(self, validate=False, headers=None, version_id=None): + self._check_bucket_uri('disable_logging') + bucket = self.get_bucket(validate, headers) + bucket.disable_logging(headers=headers) + + def get_logging_config(self, validate=False, headers=None, version_id=None): + self._check_bucket_uri('get_logging_config') + bucket = self.get_bucket(validate, headers) + return bucket.get_logging_config(headers=headers) + + def set_website_config(self, main_page_suffix=None, error_key=None, + validate=False, headers=None): + self._check_bucket_uri('set_website_config') + bucket = self.get_bucket(validate, headers) + if not (main_page_suffix or error_key): + bucket.delete_website_configuration(headers) + else: + bucket.configure_website(main_page_suffix, error_key, headers) + + def get_website_config(self, validate=False, headers=None): + self._check_bucket_uri('get_website_config') + bucket = self.get_bucket(validate, headers) + return bucket.get_website_configuration(headers) + + def get_versioning_config(self, headers=None): + self._check_bucket_uri('get_versioning_config') + bucket = self.get_bucket(False, headers) + return bucket.get_versioning_status(headers) + + def configure_versioning(self, enabled, headers=None): + self._check_bucket_uri('configure_versioning') + bucket = self.get_bucket(False, headers) + return bucket.configure_versioning(enabled, headers) + + def set_metadata(self, metadata_plus, metadata_minus, preserve_acl, + headers=None): + return self.get_key(False).set_remote_metadata(metadata_plus, + metadata_minus, + preserve_acl, + headers=headers) + + def compose(self, components, content_type=None, headers=None): + self._check_object_uri('compose') + component_keys = [] + for suri in components: + component_keys.append(suri.new_key()) + component_keys[-1].generation = suri.generation + self.generation = self.new_key().compose( + component_keys, content_type=content_type, headers=headers) + self._build_uri_strings() + return self + + def get_lifecycle_config(self, validate=False, headers=None): + """Returns a bucket's lifecycle configuration.""" + self._check_bucket_uri('get_lifecycle_config') + bucket = self.get_bucket(validate, headers) + lifecycle_config = bucket.get_lifecycle_config(headers) + self.check_response(lifecycle_config, 'lifecycle', self.uri) + return lifecycle_config + + def configure_lifecycle(self, lifecycle_config, validate=False, + headers=None): + """Sets or updates a bucket's lifecycle configuration.""" + self._check_bucket_uri('configure_lifecycle') + bucket = self.get_bucket(validate, headers) + bucket.configure_lifecycle(lifecycle_config, headers) + + def exists(self, headers=None): + """Returns True if the object exists or False if it doesn't""" + if not self.object_name: + raise InvalidUriError('exists on object-less URI (%s)' % self.uri) + bucket = self.get_bucket() + key = bucket.get_key(self.object_name, headers=headers) + return bool(key) + +class FileStorageUri(StorageUri): + """ + StorageUri subclass that handles files in the local file system. + Callers should instantiate this class by calling boto.storage_uri(). + + See file/README about how we map StorageUri operations onto a file system. + """ + + delim = os.sep + + def __init__(self, object_name, debug, is_stream=False): + """Instantiate a FileStorageUri from a path name. + + @type object_name: string + @param object_name: object name + @type debug: boolean + @param debug: whether to enable debugging on this StorageUri + + After instantiation the components are available in the following + fields: uri, scheme, bucket_name (always blank for this "anonymous" + bucket), object_name. + """ + + self.scheme = 'file' + self.bucket_name = '' + self.object_name = object_name + self.uri = 'file://' + object_name + self.debug = debug + self.stream = is_stream + + def clone_replace_name(self, new_name): + """Instantiate a FileStorageUri from the current FileStorageUri, + but replacing the object_name. + + @type new_name: string + @param new_name: new object name + """ + return FileStorageUri(new_name, self.debug, self.stream) + + def is_file_uri(self): + """Returns True if this URI names a file or directory.""" + return True + + def is_cloud_uri(self): + """Returns True if this URI names a bucket or object.""" + return False + + def names_container(self): + """Returns True if this URI names a directory or bucket.""" + return self.names_directory() + + def names_singleton(self): + """Returns True if this URI names a file (or stream) or object.""" + return not self.names_container() + + def names_directory(self): + """Returns True if this URI names a directory.""" + if self.stream: + return False + return os.path.isdir(self.object_name) + + def names_provider(self): + """Returns True if this URI names a provider.""" + return False + + def names_bucket(self): + """Returns True if this URI names a bucket.""" + return False + + def names_file(self): + """Returns True if this URI names a file.""" + return self.names_singleton() + + def names_object(self): + """Returns True if this URI names an object.""" + return False + + def is_stream(self): + """Returns True if this URI represents input/output stream. + """ + return bool(self.stream) + + def close(self): + """Closes the underlying file. + """ + self.get_key().close() + + def exists(self, _headers_not_used=None): + """Returns True if the file exists or False if it doesn't""" + # The _headers_not_used parameter is ignored. It is only there to ensure + # that this method's signature is identical to the exists method on the + # BucketStorageUri class. + return os.path.exists(self.object_name) diff --git a/awx/lib/site-packages/boto/sts/__init__.py b/awx/lib/site-packages/boto/sts/__init__.py new file mode 100644 index 0000000000..0b7a8de23c --- /dev/null +++ b/awx/lib/site-packages/boto/sts/__init__.py @@ -0,0 +1,59 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from connection import STSConnection +from boto.regioninfo import RegionInfo + + +def regions(): + """ + Get all available regions for the STS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return [RegionInfo(name='us-east-1', + endpoint='sts.amazonaws.com', + connection_cls=STSConnection), + RegionInfo(name='us-gov-west-1', + endpoint='sts.us-gov-west-1.amazonaws.com', + connection_cls=STSConnection) + + ] + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sts.connection.STSConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sts.connection.STSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/sts/connection.py b/awx/lib/site-packages/boto/sts/connection.py new file mode 100644 index 0000000000..5f488e2631 --- /dev/null +++ b/awx/lib/site-packages/boto/sts/connection.py @@ -0,0 +1,498 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from credentials import Credentials, FederationToken, AssumedRole +from credentials import DecodeAuthorizationMessage +import boto +import boto.utils +import datetime +import threading + +_session_token_cache = {} + + +class STSConnection(AWSQueryConnection): + """ + AWS Security Token Service + The AWS Security Token Service is a web service that enables you + to request temporary, limited-privilege credentials for AWS + Identity and Access Management (IAM) users or for users that you + authenticate (federated users). This guide provides descriptions + of the AWS Security Token Service API. + + For more detailed information about using this service, go to + `Using Temporary Security Credentials`_. + + For information about setting up signatures and authorization + through the API, go to `Signing AWS API Requests`_ in the AWS + General Reference . For general information about the Query API, + go to `Making Query Requests`_ in Using IAM . For information + about using security tokens with other AWS products, go to `Using + Temporary Security Credentials to Access AWS`_ in Using Temporary + Security Credentials . + + If you're new to AWS and need additional technical information + about a specific AWS product, you can find the product's technical + documentation at `http://aws.amazon.com/documentation/`_. + + We will refer to Amazon Identity and Access Management using the + abbreviated form IAM. All copyrights and legal protections still + apply. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sts.amazonaws.com' + APIVersion = '2011-06-15' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + converter=None, validate_certs=True, anon=False): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, + connection_cls=STSConnection) + self.region = region + self.anon = anon + self._mutex = threading.Semaphore() + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + validate_certs=validate_certs) + + def _required_auth_capability(self): + if self.anon: + return ['pure-query'] + else: + return ['sign-v2'] + + def _check_token_cache(self, token_key, duration=None, window_seconds=60): + token = _session_token_cache.get(token_key, None) + if token: + now = datetime.datetime.utcnow() + expires = boto.utils.parse_ts(token.expiration) + delta = expires - now + if delta < datetime.timedelta(seconds=window_seconds): + msg = 'Cached session token %s is expired' % token_key + boto.log.debug(msg) + token = None + return token + + def _get_session_token(self, duration=None, + mfa_serial_number=None, mfa_token=None): + params = {} + if duration: + params['DurationSeconds'] = duration + if mfa_serial_number: + params['SerialNumber'] = mfa_serial_number + if mfa_token: + params['TokenCode'] = mfa_token + return self.get_object('GetSessionToken', params, + Credentials, verb='POST') + + def get_session_token(self, duration=None, force_new=False, + mfa_serial_number=None, mfa_token=None): + """ + Return a valid session token. Because retrieving new tokens + from the Secure Token Service is a fairly heavyweight operation + this module caches previously retrieved tokens and returns + them when appropriate. Each token is cached with a key + consisting of the region name of the STS endpoint + concatenated with the requesting user's access id. If there + is a token in the cache meeting with this key, the session + expiration is checked to make sure it is still valid and if + so, the cached token is returned. Otherwise, a new session + token is requested from STS and it is placed into the cache + and returned. + + :type duration: int + :param duration: The number of seconds the credentials should + remain valid. + + :type force_new: bool + :param force_new: If this parameter is True, a new session token + will be retrieved from the Secure Token Service regardless + of whether there is a valid cached token or not. + + :type mfa_serial_number: str + :param mfa_serial_number: The serial number of an MFA device. + If this is provided and if the mfa_passcode provided is + valid, the temporary session token will be authorized with + to perform operations requiring the MFA device authentication. + + :type mfa_token: str + :param mfa_token: The 6 digit token associated with the + MFA device. + """ + token_key = '%s:%s' % (self.region.name, self.provider.access_key) + token = self._check_token_cache(token_key, duration) + if force_new or not token: + boto.log.debug('fetching a new token for %s' % token_key) + try: + self._mutex.acquire() + token = self._get_session_token(duration, + mfa_serial_number, + mfa_token) + _session_token_cache[token_key] = token + finally: + self._mutex.release() + return token + + def get_federation_token(self, name, duration=None, policy=None): + """ + Returns a set of temporary security credentials (consisting of + an access key ID, a secret access key, and a security token) + for a federated user. A typical use is in a proxy application + that is getting temporary security credentials on behalf of + distributed applications inside a corporate network. Because + you must call the `GetFederationToken` action using the long- + term security credentials of an IAM user, this call is + appropriate in contexts where those credentials can be safely + stored, usually in a server-based application. + + **Note:** Do not use this call in mobile applications or + client-based web applications that directly get temporary + security credentials. For those types of applications, use + `AssumeRoleWithWebIdentity`. + + The `GetFederationToken` action must be called by using the + long-term AWS security credentials of the AWS account or an + IAM user. Credentials that are created by IAM users are valid + for the specified duration, between 900 seconds (15 minutes) + and 129600 seconds (36 hours); credentials that are created by + using account credentials have a maximum duration of 3600 + seconds (1 hour). + + The permissions that are granted to the federated user are the + intersection of the policy that is passed with the + `GetFederationToken` request and policies that are associated + with of the entity making the `GetFederationToken` call. + + For more information about how permissions work, see + `Controlling Permissions in Temporary Credentials`_ in Using + Temporary Security Credentials . For information about using + `GetFederationToken` to create temporary security credentials, + see `Creating Temporary Credentials to Enable Access for + Federated Users`_ in Using Temporary Security Credentials . + + :type name: string + :param name: The name of the federated user. The name is used as an + identifier for the temporary security credentials (such as `Bob`). + For example, you can reference the federated user name in a + resource-based policy, such as in an Amazon S3 bucket policy. + + :type policy: string + :param policy: A policy that specifies the permissions that are granted + to the federated user. By default, federated users have no + permissions; they do not inherit any from the IAM user. When you + specify a policy, the federated user's permissions are intersection + of the specified policy and the IAM user's policy. If you don't + specify a policy, federated users can only access AWS resources + that explicitly allow those federated users in a resource policy, + such as in an Amazon S3 bucket policy. + + :type duration: integer + :param duration: The duration, in seconds, that the session + should last. Acceptable durations for federation sessions range + from 900 seconds (15 minutes) to 129600 seconds (36 hours), with + 43200 seconds (12 hours) as the default. Sessions for AWS account + owners are restricted to a maximum of 3600 seconds (one hour). If + the duration is longer than one hour, the session for AWS account + owners defaults to one hour. + + """ + params = {'Name': name} + if duration: + params['DurationSeconds'] = duration + if policy: + params['Policy'] = policy + return self.get_object('GetFederationToken', params, + FederationToken, verb='POST') + + def assume_role(self, role_arn, role_session_name, policy=None, + duration_seconds=None, external_id=None): + """ + Returns a set of temporary security credentials (consisting of + an access key ID, a secret access key, and a security token) + that you can use to access AWS resources that you might not + normally have access to. Typically, you use `AssumeRole` for + cross-account access or federation. + + For cross-account access, imagine that you own multiple + accounts and need to access resources in each account. You + could create long-term credentials in each account to access + those resources. However, managing all those credentials and + remembering which one can access which account can be time + consuming. Instead, you can create one set of long-term + credentials in one account and then use temporary security + credentials to access all the other accounts by assuming roles + in those accounts. For more information about roles, see + `Roles`_ in Using IAM . + + For federation, you can, for example, grant single sign-on + access to the AWS Management Console. If you already have an + identity and authentication system in your corporate network, + you don't have to recreate user identities in AWS in order to + grant those user identities access to AWS. Instead, after a + user has been authenticated, you call `AssumeRole` (and + specify the role with the appropriate permissions) to get + temporary security credentials for that user. With those + temporary security credentials, you construct a sign-in URL + that users can use to access the console. For more + information, see `Scenarios for Granting Temporary Access`_ in + AWS Security Token Service . + + The temporary security credentials are valid for the duration + that you specified when calling `AssumeRole`, which can be + from 900 seconds (15 minutes) to 3600 seconds (1 hour). The + default is 1 hour. + + The temporary security credentials that are returned from the + `AssumeRoleWithWebIdentity` response have the permissions that + are associated with the access policy of the role being + assumed and any policies that are associated with the AWS + resource being accessed. You can further restrict the + permissions of the temporary security credentials by passing a + policy in the request. The resulting permissions are an + intersection of the role's access policy and the policy that + you passed. These policies and any applicable resource-based + policies are evaluated when calls to AWS service APIs are made + using the temporary security credentials. + + To assume a role, your AWS account must be trusted by the + role. The trust relationship is defined in the role's trust + policy when the IAM role is created. You must also have a + policy that allows you to call `sts:AssumeRole`. + + **Important:** You cannot call `Assumerole` by using AWS + account credentials; access will be denied. You must use IAM + user credentials to call `AssumeRole`. + + :type role_arn: string + :param role_arn: The Amazon Resource Name (ARN) of the role that the + caller is assuming. + + :type role_session_name: string + :param role_session_name: An identifier for the assumed role session. + The session name is included as part of the `AssumedRoleUser`. + + :type policy: string + :param policy: A supplemental policy that is associated with the + temporary security credentials from the `AssumeRole` call. The + resulting permissions of the temporary security credentials are an + intersection of this policy and the access policy that is + associated with the role. Use this policy to further restrict the + permissions of the temporary security credentials. + + :type duration_seconds: integer + :param duration_seconds: The duration, in seconds, of the role session. + The value can range from 900 seconds (15 minutes) to 3600 seconds + (1 hour). By default, the value is set to 3600 seconds. + + :type external_id: string + :param external_id: A unique identifier that is used by third parties + to assume a role in their customers' accounts. For each role that + the third party can assume, they should instruct their customers to + create a role with the external ID that the third party generated. + Each time the third party assumes the role, they must pass the + customer's external ID. The external ID is useful in order to help + third parties bind a role to the customer who created it. For more + information about the external ID, see `About the External ID`_ in + Using Temporary Security Credentials . + + """ + params = { + 'RoleArn': role_arn, + 'RoleSessionName': role_session_name + } + if policy is not None: + params['Policy'] = policy + if duration_seconds is not None: + params['DurationSeconds'] = duration_seconds + if external_id is not None: + params['ExternalId'] = external_id + return self.get_object('AssumeRole', params, AssumedRole, verb='POST') + + def assume_role_with_web_identity(self, role_arn, role_session_name, + web_identity_token, provider_id=None, + policy=None, duration_seconds=None): + """ + Returns a set of temporary security credentials for users who + have been authenticated in a mobile or web application with a + web identity provider, such as Login with Amazon, Facebook, or + Google. `AssumeRoleWithWebIdentity` is an API call that does + not require the use of AWS security credentials. Therefore, + you can distribute an application (for example, on mobile + devices) that requests temporary security credentials without + including long-term AWS credentials in the application or by + deploying server-based proxy services that use long-term AWS + credentials. For more information, see `Creating a Mobile + Application with Third-Party Sign-In`_ in AWS Security Token + Service . + + The temporary security credentials consist of an access key + ID, a secret access key, and a security token. Applications + can use these temporary security credentials to sign calls to + AWS service APIs. The credentials are valid for the duration + that you specified when calling `AssumeRoleWithWebIdentity`, + which can be from 900 seconds (15 minutes) to 3600 seconds (1 + hour). By default, the temporary security credentials are + valid for 1 hour. + + The temporary security credentials that are returned from the + `AssumeRoleWithWebIdentity` response have the permissions that + are associated with the access policy of the role being + assumed. You can further restrict the permissions of the + temporary security credentials by passing a policy in the + request. The resulting permissions are an intersection of the + role's access policy and the policy that you passed. These + policies and any applicable resource-based policies are + evaluated when calls to AWS service APIs are made using the + temporary security credentials. + + Before your application can call `AssumeRoleWithWebIdentity`, + you must have an identity token from a supported identity + provider and create a role that the application can assume. + The role that your application assumes must trust the identity + provider that is associated with the identity token. In other + words, the identity provider must be specified in the role's + trust policy. For more information, see ` Creating Temporary + Security Credentials for Mobile Apps Using Third-Party + Identity Providers`_. + + :type role_arn: string + :param role_arn: The Amazon Resource Name (ARN) of the role that the + caller is assuming. + + :type role_session_name: string + :param role_session_name: An identifier for the assumed role session. + Typically, you pass the name or identifier that is associated with + the user who is using your application. That way, the temporary + security credentials that your application will use are associated + with that user. This session name is included as part of the ARN + and assumed role ID in the `AssumedRoleUser` response element. + + :type web_identity_token: string + :param web_identity_token: The OAuth 2.0 access token or OpenID Connect + ID token that is provided by the identity provider. Your + application must get this token by authenticating the user who is + using your application with a web identity provider before the + application makes an `AssumeRoleWithWebIdentity` call. + + :type provider_id: string + :param provider_id: Specify this value only for OAuth access tokens. Do + not specify this value for OpenID Connect ID tokens, such as + `accounts.google.com`. This is the fully-qualified host component + of the domain name of the identity provider. Do not include URL + schemes and port numbers. Currently, `www.amazon.com` and + `graph.facebook.com` are supported. + + :type policy: string + :param policy: A supplemental policy that is associated with the + temporary security credentials from the `AssumeRoleWithWebIdentity` + call. The resulting permissions of the temporary security + credentials are an intersection of this policy and the access + policy that is associated with the role. Use this policy to further + restrict the permissions of the temporary security credentials. + + :type duration_seconds: integer + :param duration_seconds: The duration, in seconds, of the role session. + The value can range from 900 seconds (15 minutes) to 3600 seconds + (1 hour). By default, the value is set to 3600 seconds. + + """ + params = { + 'RoleArn': role_arn, + 'RoleSessionName': role_session_name, + 'WebIdentityToken': web_identity_token, + } + if provider_id is not None: + params['ProviderId'] = provider_id + if policy is not None: + params['Policy'] = policy + if duration_seconds is not None: + params['DurationSeconds'] = duration_seconds + return self.get_object( + 'AssumeRoleWithWebIdentity', + params, + AssumedRole, + verb='POST' + ) + + def decode_authorization_message(self, encoded_message): + """ + Decodes additional information about the authorization status + of a request from an encoded message returned in response to + an AWS request. + + For example, if a user is not authorized to perform an action + that he or she has requested, the request returns a + `Client.UnauthorizedOperation` response (an HTTP 403 + response). Some AWS actions additionally return an encoded + message that can provide details about this authorization + failure. + Only certain AWS actions return an encoded authorization + message. The documentation for an individual action indicates + whether that action returns an encoded message in addition to + returning an HTTP code. + The message is encoded because the details of the + authorization status can constitute privileged information + that the user who requested the action should not see. To + decode an authorization status message, a user must be granted + permissions via an IAM policy to request the + `DecodeAuthorizationMessage` ( + `sts:DecodeAuthorizationMessage`) action. + + The decoded message includes the following type of + information: + + + + Whether the request was denied due to an explicit deny or + due to the absence of an explicit allow. For more information, + see `Determining Whether a Request is Allowed or Denied`_ in + Using IAM . + + The principal who made the request. + + The requested action. + + The requested resource. + + The values of condition keys in the context of the user's + request. + + :type encoded_message: string + :param encoded_message: The encoded message that was returned with the + response. + + """ + params = { + 'EncodedMessage': encoded_message, + } + return self.get_object( + 'DecodeAuthorizationMessage', + params, + DecodeAuthorizationMessage, + verb='POST' + ) diff --git a/awx/lib/site-packages/boto/sts/credentials.py b/awx/lib/site-packages/boto/sts/credentials.py new file mode 100644 index 0000000000..21828db77c --- /dev/null +++ b/awx/lib/site-packages/boto/sts/credentials.py @@ -0,0 +1,237 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os +import datetime + +import boto.utils +from boto.compat import json + + +class Credentials(object): + """ + :ivar access_key: The AccessKeyID. + :ivar secret_key: The SecretAccessKey. + :ivar session_token: The session token that must be passed with + requests to use the temporary credentials + :ivar expiration: The timestamp for when the credentials will expire + """ + + def __init__(self, parent=None): + self.parent = parent + self.access_key = None + self.secret_key = None + self.session_token = None + self.expiration = None + self.request_id = None + + @classmethod + def from_json(cls, json_doc): + """ + Create and return a new Session Token based on the contents + of a JSON document. + + :type json_doc: str + :param json_doc: A string containing a JSON document with a + previously saved Credentials object. + """ + d = json.loads(json_doc) + token = cls() + token.__dict__.update(d) + return token + + @classmethod + def load(cls, file_path): + """ + Create and return a new Session Token based on the contents + of a previously saved JSON-format file. + + :type file_path: str + :param file_path: The fully qualified path to the JSON-format + file containing the previously saved Session Token information. + """ + fp = open(file_path) + json_doc = fp.read() + fp.close() + return cls.from_json(json_doc) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'AccessKeyId': + self.access_key = value + elif name == 'SecretAccessKey': + self.secret_key = value + elif name == 'SessionToken': + self.session_token = value + elif name == 'Expiration': + self.expiration = value + elif name == 'RequestId': + self.request_id = value + else: + pass + + def to_dict(self): + """ + Return a Python dict containing the important information + about this Session Token. + """ + return {'access_key': self.access_key, + 'secret_key': self.secret_key, + 'session_token': self.session_token, + 'expiration': self.expiration, + 'request_id': self.request_id} + + def save(self, file_path): + """ + Persist a Session Token to a file in JSON format. + + :type path: str + :param path: The fully qualified path to the file where the + the Session Token data should be written. Any previous + data in the file will be overwritten. To help protect + the credentials contained in the file, the permissions + of the file will be set to readable/writable by owner only. + """ + fp = open(file_path, 'wb') + json.dump(self.to_dict(), fp) + fp.close() + os.chmod(file_path, 0600) + + def is_expired(self, time_offset_seconds=0): + """ + Checks to see if the Session Token is expired or not. By default + it will check to see if the Session Token is expired as of the + moment the method is called. However, you can supply an + optional parameter which is the number of seconds of offset + into the future for the check. For example, if you supply + a value of 5, this method will return a True if the Session + Token will be expired 5 seconds from this moment. + + :type time_offset_seconds: int + :param time_offset_seconds: The number of seconds into the future + to test the Session Token for expiration. + """ + now = datetime.datetime.utcnow() + if time_offset_seconds: + now = now + datetime.timedelta(seconds=time_offset_seconds) + ts = boto.utils.parse_ts(self.expiration) + delta = ts - now + return delta.total_seconds() <= 0 + + +class FederationToken(object): + """ + :ivar credentials: A Credentials object containing the credentials. + :ivar federated_user_arn: ARN specifying federated user using credentials. + :ivar federated_user_id: The ID of the federated user using credentials. + :ivar packed_policy_size: A percentage value indicating the size of + the policy in packed form + """ + + def __init__(self, parent=None): + self.parent = parent + self.credentials = None + self.federated_user_arn = None + self.federated_user_id = None + self.packed_policy_size = None + self.request_id = None + + def startElement(self, name, attrs, connection): + if name == 'Credentials': + self.credentials = Credentials() + return self.credentials + else: + return None + + def endElement(self, name, value, connection): + if name == 'Arn': + self.federated_user_arn = value + elif name == 'FederatedUserId': + self.federated_user_id = value + elif name == 'PackedPolicySize': + self.packed_policy_size = int(value) + elif name == 'RequestId': + self.request_id = value + else: + pass + + +class AssumedRole(object): + """ + :ivar user: The assumed role user. + :ivar credentials: A Credentials object containing the credentials. + """ + def __init__(self, connection=None, credentials=None, user=None): + self._connection = connection + self.credentials = credentials + self.user = user + + def startElement(self, name, attrs, connection): + if name == 'Credentials': + self.credentials = Credentials() + return self.credentials + elif name == 'AssumedRoleUser': + self.user = User() + return self.user + + def endElement(self, name, value, connection): + pass + + +class User(object): + """ + :ivar arn: The arn of the user assuming the role. + :ivar assume_role_id: The identifier of the assumed role. + """ + def __init__(self, arn=None, assume_role_id=None): + self.arn = arn + self.assume_role_id = assume_role_id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Arn': + self.arn = value + elif name == 'AssumedRoleId': + self.assume_role_id = value + + +class DecodeAuthorizationMessage(object): + """ + :ivar request_id: The request ID. + :ivar decoded_message: The decoded authorization message (may be JSON). + """ + def __init__(self, request_id=None, decoded_message=None): + self.request_id = request_id + self.decoded_message = decoded_message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'requestId': + self.request_id = value + elif name == 'DecodedMessage': + self.decoded_message = value diff --git a/awx/lib/site-packages/boto/support/__init__.py b/awx/lib/site-packages/boto/support/__init__.py new file mode 100644 index 0000000000..6d59b375e0 --- /dev/null +++ b/awx/lib/site-packages/boto/support/__init__.py @@ -0,0 +1,47 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + + +def regions(): + """ + Get all available regions for the Amazon Support service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.support.layer1 import SupportConnection + return [ + RegionInfo( + name='us-east-1', + endpoint='support.us-east-1.amazonaws.com', + connection_cls=SupportConnection + ), + ] + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/support/exceptions.py b/awx/lib/site-packages/boto/support/exceptions.py new file mode 100644 index 0000000000..f4e33d016d --- /dev/null +++ b/awx/lib/site-packages/boto/support/exceptions.py @@ -0,0 +1,34 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class CaseIdNotFound(JSONResponseError): + pass + + +class CaseCreationLimitExceeded(JSONResponseError): + pass + + +class InternalServerError(JSONResponseError): + pass diff --git a/awx/lib/site-packages/boto/support/layer1.py b/awx/lib/site-packages/boto/support/layer1.py new file mode 100644 index 0000000000..5e73db262a --- /dev/null +++ b/awx/lib/site-packages/boto/support/layer1.py @@ -0,0 +1,529 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import json +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.support import exceptions + + +class SupportConnection(AWSQueryConnection): + """ + AWS Support + The AWS Support API reference is intended for programmers who need + detailed information about the AWS Support actions and data types. + This service enables you to manage with your AWS Support cases + programmatically. It is built on the AWS Query API programming + model and provides HTTP methods that take parameters and return + results in JSON format. + + The AWS Support service also exposes a set of `Trusted Advisor`_ + features. You can retrieve a list of checks you can run on your + resources, specify checks to run and refresh, and check the status + of checks you have submitted. + + The following list describes the AWS Support case management + actions: + + + + **Service names, issue categories, and available severity + levels. **The actions `DescribeServices`_ and + `DescribeSeverityLevels`_ enable you to obtain AWS service names, + service codes, service categories, and problem severity levels. + You use these values when you call the `CreateCase`_ action. + + **Case Creation, case details, and case resolution**. The + actions `CreateCase`_, `DescribeCases`_, and `ResolveCase`_ enable + you to create AWS Support cases, retrieve them, and resolve them. + + **Case communication**. The actions + `DescribeCaseCommunications`_ and `AddCommunicationToCase`_ enable + you to retrieve and add communication to AWS Support cases. + + + The following list describes the actions available from the AWS + Support service for Trusted Advisor: + + + + `DescribeTrustedAdviserChecks`_ returns the list of checks that you can run against your AWS + resources. + + Using the CheckId for a specific check returned by + DescribeTrustedAdviserChecks, you can call + `DescribeTrustedAdvisorCheckResult`_ and obtain a new result for the check you specified. + + Using `DescribeTrustedAdvisorCheckSummaries`_, you can get + summaries for a set of Trusted Advisor checks. + + `RefreshTrustedAdvisorCheck`_ enables you to request that + Trusted Advisor run the check again. + + ``_ gets statuses on the checks you are running. + + + For authentication of requests, the AWS Support uses `Signature + Version 4 Signing Process`_. + + See the AWS Support Developer Guide for information about how to + use this service to manage create and manage your support cases, + and how to call Trusted Advisor for results of checks on your + resources. + """ + APIVersion = "2013-04-15" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "support.us-east-1.amazonaws.com" + ServiceName = "Support" + TargetPrefix = "AWSSupport_20130415" + ResponseError = JSONResponseError + + _faults = { + "CaseIdNotFound": exceptions.CaseIdNotFound, + "CaseCreationLimitExceeded": exceptions.CaseCreationLimitExceeded, + "InternalServerError": exceptions.InternalServerError, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + kwargs['host'] = region.endpoint + AWSQueryConnection.__init__(self, **kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_communication_to_case(self, communication_body, case_id=None, + cc_email_addresses=None): + """ + This action adds additional customer communication to an AWS + Support case. You use the CaseId value to identify the case to + which you want to add communication. You can list a set of + email addresses to copy on the communication using the + CcEmailAddresses value. The CommunicationBody value contains + the text of the communication. + + This action's response indicates the success or failure of the + request. + + This action implements a subset of the behavior on the AWS + Support `Your Support Cases`_ web form. + + :type case_id: string + :param case_id: + + :type communication_body: string + :param communication_body: + + :type cc_email_addresses: list + :param cc_email_addresses: + + """ + params = {'communicationBody': communication_body, } + if case_id is not None: + params['caseId'] = case_id + if cc_email_addresses is not None: + params['ccEmailAddresses'] = cc_email_addresses + return self.make_request(action='AddCommunicationToCase', + body=json.dumps(params)) + + def create_case(self, subject, service_code, category_code, + communication_body, severity_code=None, + cc_email_addresses=None, language=None, issue_type=None): + """ + Creates a new case in the AWS Support Center. This action is + modeled on the behavior of the AWS Support Center `Open a new + case`_ page. Its parameters require you to specify the + following information: + + + #. **ServiceCode.** Represents a code for an AWS service. You + obtain the ServiceCode by calling `DescribeServices`_. + #. **CategoryCode**. Represents a category for the service + defined for the ServiceCode value. You also obtain the + cateogory code for a service by calling `DescribeServices`_. + Each AWS service defines its own set of category codes. + #. **SeverityCode**. Represents a value that specifies the + urgency of the case, and the time interval in which your + service level agreement specifies a response from AWS Support. + You obtain the SeverityCode by calling + `DescribeSeverityLevels`_. + #. **Subject**. Represents the **Subject** field on the AWS + Support Center `Open a new case`_ page. + #. **CommunicationBody**. Represents the **Description** field + on the AWS Support Center `Open a new case`_ page. + #. **Language**. Specifies the human language in which AWS + Support handles the case. The API currently supports English + and Japanese. + #. **CcEmailAddresses**. Represents the AWS Support Center + **CC** field on the `Open a new case`_ page. You can list + email addresses to be copied on any correspondence about the + case. The account that opens the case is already identified by + passing the AWS Credentials in the HTTP POST method or in a + method or function call from one of the programming languages + supported by an `AWS SDK`_. + + + The AWS Support API does not currently support the ability to + add attachments to cases. You can, however, call + `AddCommunicationToCase`_ to add information to an open case. + + A successful `CreateCase`_ request returns an AWS Support case + number. Case numbers are used by `DescribeCases`_ request to + retrieve existing AWS Support support cases. + + :type subject: string + :param subject: + + :type service_code: string + :param service_code: + + :type severity_code: string + :param severity_code: + + :type category_code: string + :param category_code: + + :type communication_body: string + :param communication_body: + + :type cc_email_addresses: list + :param cc_email_addresses: + + :type language: string + :param language: + + :type issue_type: string + :param issue_type: + + """ + params = { + 'subject': subject, + 'serviceCode': service_code, + 'categoryCode': category_code, + 'communicationBody': communication_body, + } + if severity_code is not None: + params['severityCode'] = severity_code + if cc_email_addresses is not None: + params['ccEmailAddresses'] = cc_email_addresses + if language is not None: + params['language'] = language + if issue_type is not None: + params['issueType'] = issue_type + return self.make_request(action='CreateCase', + body=json.dumps(params)) + + def describe_cases(self, case_id_list=None, display_id=None, + after_time=None, before_time=None, + include_resolved_cases=None, next_token=None, + max_results=None, language=None): + """ + This action returns a list of cases that you specify by + passing one or more CaseIds. In addition, you can filter the + cases by date by setting values for the AfterTime and + BeforeTime request parameters. + The response returns the following in JSON format: + + #. One or more `CaseDetails`_ data types. + #. One or more NextToken objects, strings that specifies where + to paginate the returned records represented by CaseDetails . + + :type case_id_list: list + :param case_id_list: + + :type display_id: string + :param display_id: + + :type after_time: string + :param after_time: + + :type before_time: string + :param before_time: + + :type include_resolved_cases: boolean + :param include_resolved_cases: + + :type next_token: string + :param next_token: + + :type max_results: integer + :param max_results: + + :type language: string + :param language: + + """ + params = {} + if case_id_list is not None: + params['caseIdList'] = case_id_list + if display_id is not None: + params['displayId'] = display_id + if after_time is not None: + params['afterTime'] = after_time + if before_time is not None: + params['beforeTime'] = before_time + if include_resolved_cases is not None: + params['includeResolvedCases'] = include_resolved_cases + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + if language is not None: + params['language'] = language + return self.make_request(action='DescribeCases', + body=json.dumps(params)) + + def describe_communications(self, case_id, before_time=None, + after_time=None, next_token=None, + max_results=None): + """ + This action returns communications regarding the support case. + You can use the AfterTime and BeforeTime parameters to filter + by date. The CaseId parameter enables you to identify a + specific case by its CaseId number. + + The MaxResults and NextToken parameters enable you to control + the pagination of the result set. Set MaxResults to the number + of cases you want displayed on each page, and use NextToken to + specify the resumption of pagination. + + :type case_id: string + :param case_id: + + :type before_time: string + :param before_time: + + :type after_time: string + :param after_time: + + :type next_token: string + :param next_token: + + :type max_results: integer + :param max_results: + + """ + params = {'caseId': case_id, } + if before_time is not None: + params['beforeTime'] = before_time + if after_time is not None: + params['afterTime'] = after_time + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self.make_request(action='DescribeCommunications', + body=json.dumps(params)) + + def describe_services(self, service_code_list=None, language=None): + """ + Returns the current list of AWS services and a list of service + categories that applies to each one. You then use service + names and categories in your `CreateCase`_ requests. Each AWS + service has its own set of categories. + + The service codes and category codes correspond to the values + that are displayed in the **Service** and **Category** drop- + down lists on the AWS Support Center `Open a new case`_ page. + The values in those fields, however, do not necessarily match + the service codes and categories returned by the + `DescribeServices` request. Always use the service codes and + categories obtained programmatically. This practice ensures + that you always have the most recent set of service and + category codes. + + :type service_code_list: list + :param service_code_list: + + :type language: string + :param language: + + """ + params = {} + if service_code_list is not None: + params['serviceCodeList'] = service_code_list + if language is not None: + params['language'] = language + return self.make_request(action='DescribeServices', + body=json.dumps(params)) + + def describe_severity_levels(self, language=None): + """ + This action returns the list of severity levels that you can + assign to an AWS Support case. The severity level for a case + is also a field in the `CaseDetails`_ data type included in + any `CreateCase`_ request. + + :type language: string + :param language: + + """ + params = {} + if language is not None: + params['language'] = language + return self.make_request(action='DescribeSeverityLevels', + body=json.dumps(params)) + + def resolve_case(self, case_id=None): + """ + Takes a CaseId and returns the initial state of the case along + with the state of the case after the call to `ResolveCase`_ + completed. + + :type case_id: string + :param case_id: + + """ + params = {} + if case_id is not None: + params['caseId'] = case_id + return self.make_request(action='ResolveCase', + body=json.dumps(params)) + + def describe_trusted_advisor_check_refresh_statuses(self, check_ids): + """ + Returns the status of all refresh requests Trusted Advisor + checks called using `RefreshTrustedAdvisorCheck`_. + + :type check_ids: list + :param check_ids: + + """ + params = {'checkIds': check_ids, } + return self.make_request(action='DescribeTrustedAdvisorCheckRefreshStatuses', + body=json.dumps(params)) + + def describe_trusted_advisor_check_result(self, check_id, language=None): + """ + This action responds with the results of a Trusted Advisor + check. Once you have obtained the list of available Trusted + Advisor checks by calling `DescribeTrustedAdvisorChecks`_, you + specify the CheckId for the check you want to retrieve from + AWS Support. + + The response for this action contains a JSON-formatted + `TrustedAdvisorCheckResult`_ object + , which is a container for the following three objects: + + + + #. `TrustedAdvisorCategorySpecificSummary`_ + #. `TrustedAdvisorResourceDetail`_ + #. `TrustedAdvisorResourcesSummary`_ + + + In addition, the response contains the following fields: + + + #. **Status**. Overall status of the check. + #. **Timestamp**. Time at which Trusted Advisor last ran the + check. + #. **CheckId**. Unique identifier for the specific check + returned by the request. + + :type check_id: string + :param check_id: + + :type language: string + :param language: + + """ + params = {'checkId': check_id, } + if language is not None: + params['language'] = language + return self.make_request(action='DescribeTrustedAdvisorCheckResult', + body=json.dumps(params)) + + def describe_trusted_advisor_check_summaries(self, check_ids): + """ + This action enables you to get the latest summaries for + Trusted Advisor checks that you specify in your request. You + submit the list of Trusted Advisor checks for which you want + summaries. You obtain these CheckIds by submitting a + `DescribeTrustedAdvisorChecks`_ request. + + The response body contains an array of + `TrustedAdvisorCheckSummary`_ objects. + + :type check_ids: list + :param check_ids: + + """ + params = {'checkIds': check_ids, } + return self.make_request(action='DescribeTrustedAdvisorCheckSummaries', + body=json.dumps(params)) + + def describe_trusted_advisor_checks(self, language): + """ + This action enables you to get a list of the available Trusted + Advisor checks. You must specify a language code. English + ("en") and Japanese ("jp") are currently supported. The + response contains a list of `TrustedAdvisorCheckDescription`_ + objects. + + :type language: string + :param language: + + """ + params = {'language': language, } + return self.make_request(action='DescribeTrustedAdvisorChecks', + body=json.dumps(params)) + + def refresh_trusted_advisor_check(self, check_id): + """ + This action enables you to query the service to request a + refresh for a specific Trusted Advisor check. Your request + body contains a CheckId for which you are querying. The + response body contains a `RefreshTrustedAdvisorCheckResult`_ + object containing Status and TimeUntilNextRefresh fields. + + :type check_id: string + :param check_id: + + """ + params = {'checkId': check_id, } + return self.make_request(action='RefreshTrustedAdvisorCheck', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read() + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/awx/lib/site-packages/boto/swf/__init__.py b/awx/lib/site-packages/boto/swf/__init__.py new file mode 100644 index 0000000000..3594444d30 --- /dev/null +++ b/awx/lib/site-packages/boto/swf/__init__.py @@ -0,0 +1,57 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.ec2.regioninfo import RegionInfo +import boto.swf.layer1 + +REGION_ENDPOINTS = { + 'us-east-1': 'swf.us-east-1.amazonaws.com', + 'us-gov-west-1': 'swf.us-gov-west-1.amazonaws.com', + 'us-west-1': 'swf.us-west-1.amazonaws.com', + 'us-west-2': 'swf.us-west-2.amazonaws.com', + 'sa-east-1': 'swf.sa-east-1.amazonaws.com', + 'eu-west-1': 'swf.eu-west-1.amazonaws.com', + 'ap-northeast-1': 'swf.ap-northeast-1.amazonaws.com', + 'ap-southeast-1': 'swf.ap-southeast-1.amazonaws.com', + 'ap-southeast-2': 'swf.ap-southeast-2.amazonaws.com', +} + + +def regions(**kw_params): + """ + Get all available regions for the Amazon Simple Workflow service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + return [RegionInfo(name=region_name, endpoint=REGION_ENDPOINTS[region_name], + connection_cls=boto.swf.layer1.Layer1) + for region_name in REGION_ENDPOINTS] + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/swf/exceptions.py b/awx/lib/site-packages/boto/swf/exceptions.py new file mode 100644 index 0000000000..f3ac6aeb73 --- /dev/null +++ b/awx/lib/site-packages/boto/swf/exceptions.py @@ -0,0 +1,44 @@ +""" +Exceptions that are specific to the swf module. + +This module subclasses the base SWF response exception, +boto.exceptions.SWFResponseError, for some of the SWF specific faults. +""" +from boto.exception import SWFResponseError + + +class SWFDomainAlreadyExistsError(SWFResponseError): + """ + Raised when when the domain already exists. + """ + pass + + +class SWFLimitExceededError(SWFResponseError): + """ + Raised when when a system imposed limitation has been reached. + """ + pass + + +class SWFOperationNotPermittedError(SWFResponseError): + """ + Raised when (reserved for future use). + """ + + +class SWFTypeAlreadyExistsError(SWFResponseError): + """ + Raised when when the workflow type or activity type already exists. + """ + pass + + +class SWFWorkflowExecutionAlreadyStartedError(SWFResponseError): + """ + Raised when an open execution with the same workflow_id is already running + in the specified domain. + """ + + + diff --git a/awx/lib/site-packages/boto/swf/layer1.py b/awx/lib/site-packages/boto/swf/layer1.py new file mode 100644 index 0000000000..264016bd8f --- /dev/null +++ b/awx/lib/site-packages/boto/swf/layer1.py @@ -0,0 +1,1512 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time + +import boto +from boto.connection import AWSAuthConnection +from boto.provider import Provider +from boto.exception import SWFResponseError +from boto.swf import exceptions as swf_exceptions +from boto.compat import json + +# +# To get full debug output, uncomment the following line and set the +# value of Debug to be 2 +# +#boto.set_stream_logger('swf') +Debug = 0 + + +class Layer1(AWSAuthConnection): + """ + Low-level interface to Simple WorkFlow Service. + """ + + DefaultRegionName = 'us-east-1' + """The default region name for Simple Workflow.""" + + ServiceName = 'com.amazonaws.swf.service.model.SimpleWorkflowService' + """The name of the Service""" + + # In some cases, the fault response __type value is mapped to + # an exception class more specific than SWFResponseError. + _fault_excp = { + 'com.amazonaws.swf.base.model#DomainAlreadyExistsFault': + swf_exceptions.SWFDomainAlreadyExistsError, + 'com.amazonaws.swf.base.model#LimitExceededFault': + swf_exceptions.SWFLimitExceededError, + 'com.amazonaws.swf.base.model#OperationNotPermittedFault': + swf_exceptions.SWFOperationNotPermittedError, + 'com.amazonaws.swf.base.model#TypeAlreadyExistsFault': + swf_exceptions.SWFTypeAlreadyExistsError, + 'com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault': + swf_exceptions.SWFWorkflowExecutionAlreadyStartedError, + } + + ResponseError = SWFResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + debug=0, session_token=None, region=None): + if not region: + region_name = boto.config.get('SWF', 'region', + self.DefaultRegionName) + for reg in boto.swf.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + AWSAuthConnection.__init__(self, self.region.endpoint, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + debug, session_token) + + def _required_auth_capability(self): + return ['hmac-v4'] + + @classmethod + def _normalize_request_dict(cls, data): + """ + This class method recurses through request data dictionary and removes + any default values. + + :type data: dict + :param data: Specifies request parameters with default values to be removed. + """ + for item in data.keys(): + if isinstance(data[item], dict): + cls._normalize_request_dict(data[item]) + if data[item] in (None, {}): + del data[item] + + def json_request(self, action, data, object_hook=None): + """ + This method wraps around make_request() to normalize and serialize the + dictionary with request parameters. + + :type action: string + :param action: Specifies an SWF action. + + :type data: dict + :param data: Specifies request parameters associated with the action. + """ + self._normalize_request_dict(data) + json_input = json.dumps(data) + return self.make_request(action, json_input, object_hook) + + def make_request(self, action, body='', object_hook=None): + """ + :raises: ``SWFResponseError`` if response status is not 200. + """ + headers = {'X-Amz-Target': '%s.%s' % (self.ServiceName, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/json; charset=UTF-8', + 'Content-Encoding': 'amz-1.0', + 'Content-Length': str(len(body))} + http_request = self.build_base_http_request('POST', '/', '/', + {}, headers, body, None) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read() + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body, object_hook=object_hook) + else: + return None + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + # Certain faults get mapped to more specific exception classes. + excp_cls = self._fault_excp.get(fault_name, self.ResponseError) + raise excp_cls(response.status, response.reason, body=json_body) + + # Actions related to Activities + + def poll_for_activity_task(self, domain, task_list, identity=None): + """ + Used by workers to get an ActivityTask from the specified + activity taskList. This initiates a long poll, where the + service holds the HTTP connection open and responds as soon as + a task becomes available. The maximum time the service holds + on to the request before responding is 60 seconds. If no task + is available within 60 seconds, the poll will return an empty + result. An empty result, in this context, means that an + ActivityTask is returned, but that the value of taskToken is + an empty string. If a task is returned, the worker should use + its type to identify and process it correctly. + + :type domain: string + :param domain: The name of the domain that contains the task + lists being polled. + + :type task_list: string + :param task_list: Specifies the task list to poll for activity tasks. + + :type identity: string + :param identity: Identity of the worker making the request, which + is recorded in the ActivityTaskStarted event in the workflow + history. This enables diagnostic tracing when problems arise. + The form of this identity is user defined. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('PollForActivityTask', { + 'domain': domain, + 'taskList': {'name': task_list}, + 'identity': identity, + }) + + def respond_activity_task_completed(self, task_token, result=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken completed successfully with a + result (if provided). + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type result: string + :param result: The result of the activity task. It is a free + form string that is implementation specific. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskCompleted', { + 'taskToken': task_token, + 'result': result, + }) + + def respond_activity_task_failed(self, task_token, + details=None, reason=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken has failed with reason (if + specified). + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: Optional detailed information about the failure. + + :type reason: string + :param reason: Description of the error that may assist in diagnostics. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskFailed', { + 'taskToken': task_token, + 'details': details, + 'reason': reason, + }) + + def respond_activity_task_canceled(self, task_token, details=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken was successfully + canceled. Additional details can be optionally provided using + the details argument. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: Optional detailed information about the failure. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskCanceled', { + 'taskToken': task_token, + 'details': details, + }) + + def record_activity_task_heartbeat(self, task_token, details=None): + """ + Used by activity workers to report to the service that the + ActivityTask represented by the specified taskToken is still + making progress. The worker can also (optionally) specify + details of the progress, for example percent complete, using + the details parameter. This action can also be used by the + worker as a mechanism to check if cancellation is being + requested for the activity task. If a cancellation is being + attempted for the specified task, then the boolean + cancelRequested flag returned by the service is set to true. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: If specified, contains details about the + progress of the task. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RecordActivityTaskHeartbeat', { + 'taskToken': task_token, + 'details': details, + }) + + # Actions related to Deciders + + def poll_for_decision_task(self, domain, task_list, identity=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Used by deciders to get a DecisionTask from the specified + decision taskList. A decision task may be returned for any + open workflow execution that is using the specified task + list. The task includes a paginated view of the history of the + workflow execution. The decider should use the workflow type + and the history to determine how to properly handle the task. + + :type domain: string + :param domain: The name of the domain containing the task + lists to poll. + + :type task_list: string + :param task_list: Specifies the task list to poll for decision tasks. + + :type identity: string + :param identity: Identity of the decider making the request, + which is recorded in the DecisionTaskStarted event in the + workflow history. This enables diagnostic tracing when + problems arise. The form of this identity is user defined. + + :type maximum_page_size: integer :param maximum_page_size: The + maximum number of history events returned in each page. The + default is 100, but the caller can override this value to a + page size smaller than the default. You cannot specify a page + size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being paginated. + To get the next page of results, repeat the call with the + returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the events in + reverse order. By default the results are returned in + ascending order of the eventTimestamp of the events. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('PollForDecisionTask', { + 'domain': domain, + 'taskList': {'name': task_list}, + 'identity': identity, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def respond_decision_task_completed(self, task_token, + decisions=None, + execution_context=None): + """ + Used by deciders to tell the service that the DecisionTask + identified by the taskToken has successfully completed. + The decisions argument specifies the list of decisions + made while processing the task. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type decisions: list + :param decisions: The list of decisions (possibly empty) made by + the decider while processing this decision task. See the docs + for the Decision structure for details. + + :type execution_context: string + :param execution_context: User defined context to add to + workflow execution. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondDecisionTaskCompleted', { + 'taskToken': task_token, + 'decisions': decisions, + 'executionContext': execution_context, + }) + + def request_cancel_workflow_execution(self, domain, workflow_id, + run_id=None): + """ + Records a WorkflowExecutionCancelRequested event in the + currently running workflow execution identified by the given + domain, workflowId, and runId. This logically requests the + cancellation of the workflow execution as a whole. It is up to + the decider to take appropriate actions when it receives an + execution history with this event. + + :type domain: string + :param domain: The name of the domain containing the workflow + execution to cancel. + + :type run_id: string + :param run_id: The runId of the workflow execution to cancel. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to cancel. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RequestCancelWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'runId': run_id, + }) + + def start_workflow_execution(self, domain, workflow_id, + workflow_name, workflow_version, + task_list=None, child_policy=None, + execution_start_to_close_timeout=None, + input=None, tag_list=None, + task_start_to_close_timeout=None): + """ + Starts an execution of the workflow type in the specified + domain using the provided workflowId and input data. + + :type domain: string + :param domain: The name of the domain in which the workflow + execution is created. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated with + the workflow execution. You can use this to associate a + custom identifier with the workflow execution. You may + specify the same identifier if a workflow execution is + logically a restart of a previous execution. You cannot + have two open workflow executions with the same workflowId + at the same time. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :type task_list: string + :param task_list: The task list to use for the decision tasks + generated for this workflow execution. This overrides the + defaultTaskList specified when registering the workflow type. + + :type child_policy: string + :param child_policy: If set, specifies the policy to use for the + child workflow executions of this workflow execution if it + is terminated, by calling the TerminateWorkflowExecution + action explicitly or due to an expired timeout. This policy + overrides the default child policy specified when registering + the workflow type using RegisterWorkflowType. The supported + child policies are: + + * TERMINATE: the child executions will be terminated. + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its history. + It is up to the decider to take appropriate actions + when it receives an execution history with this event. + * ABANDON: no action will be taken. The child executions + will continue to run. + + :type execution_start_to_close_timeout: string + :param execution_start_to_close_timeout: The total duration for + this workflow execution. This overrides the + defaultExecutionStartToCloseTimeout specified when + registering the workflow type. + + :type input: string + :param input: The input for the workflow + execution. This is a free form string which should be + meaningful to the workflow you are starting. This input is + made available to the new workflow execution in the + WorkflowExecutionStarted history event. + + :type tag_list: list :param tag_list: The list of tags to + associate with the workflow execution. You can specify a + maximum of 5 tags. You can list workflow executions with a + specific tag by calling list_open_workflow_executions or + list_closed_workflow_executions and specifying a TagFilter. + + :type task_start_to_close_timeout: string :param + task_start_to_close_timeout: Specifies the maximum duration of + decision tasks for this workflow execution. This parameter + overrides the defaultTaskStartToCloseTimout specified when + registering the workflow type using register_workflow_type. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFWorkflowExecutionAlreadyStartedError, SWFLimitExceededError, + SWFOperationNotPermittedError, DefaultUndefinedFault + """ + return self.json_request('StartWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'workflowType': {'name': workflow_name, + 'version': workflow_version}, + 'taskList': {'name': task_list}, + 'childPolicy': child_policy, + 'executionStartToCloseTimeout': execution_start_to_close_timeout, + 'input': input, + 'tagList': tag_list, + 'taskStartToCloseTimeout': task_start_to_close_timeout, + + }) + + def signal_workflow_execution(self, domain, signal_name, workflow_id, + input=None, run_id=None): + """ + Records a WorkflowExecutionSignaled event in the workflow + execution history and creates a decision task for the workflow + execution identified by the given domain, workflowId and + runId. The event is recorded with the specified user defined + signalName and input (if provided). + + :type domain: string + :param domain: The name of the domain containing the workflow + execution to signal. + + :type signal_name: string + :param signal_name: The name of the signal. This name must be + meaningful to the target workflow. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to signal. + + :type input: string + :param input: Data to attach to the WorkflowExecutionSignaled + event in the target workflow execution's history. + + :type run_id: string + :param run_id: The runId of the workflow execution to signal. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('SignalWorkflowExecution', { + 'domain': domain, + 'signalName': signal_name, + 'workflowId': workflow_id, + 'input': input, + 'runId': run_id, + }) + + def terminate_workflow_execution(self, domain, workflow_id, + child_policy=None, details=None, + reason=None, run_id=None): + """ + Records a WorkflowExecutionTerminated event and forces closure + of the workflow execution identified by the given domain, + runId, and workflowId. The child policy, registered with the + workflow type or specified when starting this execution, is + applied to any open child workflow executions of this workflow + execution. + + :type domain: string + :param domain: The domain of the workflow execution to terminate. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to terminate. + + :type child_policy: string + :param child_policy: If set, specifies the policy to use for + the child workflow executions of the workflow execution being + terminated. This policy overrides the child policy specified + for the workflow execution at registration time or when + starting the execution. The supported child policies are: + + * TERMINATE: the child executions will be terminated. + + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its + history. It is up to the decider to take appropriate + actions when it receives an execution history with this + event. + + * ABANDON: no action will be taken. The child executions + will continue to run. + + :type details: string + :param details: Optional details for terminating the + workflow execution. + + :type reason: string + :param reason: An optional descriptive reason for terminating + the workflow execution. + + :type run_id: string + :param run_id: The runId of the workflow execution to terminate. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('TerminateWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'childPolicy': child_policy, + 'details': details, + 'reason': reason, + 'runId': run_id, + }) + +# Actions related to Administration + +## Activity Management + + def register_activity_type(self, domain, name, version, task_list=None, + default_task_heartbeat_timeout=None, + default_task_schedule_to_close_timeout=None, + default_task_schedule_to_start_timeout=None, + default_task_start_to_close_timeout=None, + description=None): + """ + Registers a new activity type along with its configuration + settings in the specified domain. + + :type domain: string + :param domain: The name of the domain in which this activity is + to be registered. + + :type name: string + :param name: The name of the activity type within the domain. + + :type version: string + :param version: The version of the activity type. + + :type task_list: string + :param task_list: If set, specifies the default task list to + use for scheduling tasks of this activity type. This default + task list is used if a task list is not provided when a task + is scheduled through the schedule_activity_task Decision. + + :type default_task_heartbeat_timeout: string + :param default_task_heartbeat_timeout: If set, specifies the + default maximum time before which a worker processing a task + of this type must report progress by calling + RecordActivityTaskHeartbeat. If the timeout is exceeded, the + activity task is automatically timed out. This default can be + overridden when scheduling an activity task using the + ScheduleActivityTask Decision. If the activity worker + subsequently attempts to record a heartbeat or returns a + result, the activity worker receives an UnknownResource + fault. In this case, Amazon SWF no longer considers the + activity task to be valid; the activity worker should clean up + the activity task.no docs + + :type default_task_schedule_to_close_timeout: string + :param default_task_schedule_to_close_timeout: If set, + specifies the default maximum duration for a task of this + activity type. This default can be overridden when scheduling + an activity task using the ScheduleActivityTask Decision.no + docs + + :type default_task_schedule_to_start_timeout: string + :param default_task_schedule_to_start_timeout: If set, + specifies the default maximum duration that a task of this + activity type can wait before being assigned to a worker. This + default can be overridden when scheduling an activity task + using the ScheduleActivityTask Decision. + + :type default_task_start_to_close_timeout: string + :param default_task_start_to_close_timeout: If set, specifies + the default maximum duration that a worker can take to process + tasks of this activity type. This default can be overridden + when scheduling an activity task using the + ScheduleActivityTask Decision. + + :type description: string + :param description: A textual description of the activity type. + + :raises: SWFTypeAlreadyExistsError, SWFLimitExceededError, + UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RegisterActivityType', { + 'domain': domain, + 'name': name, + 'version': version, + 'defaultTaskList': {'name': task_list}, + 'defaultTaskHeartbeatTimeout': default_task_heartbeat_timeout, + 'defaultTaskScheduleToCloseTimeout': default_task_schedule_to_close_timeout, + 'defaultTaskScheduleToStartTimeout': default_task_schedule_to_start_timeout, + 'defaultTaskStartToCloseTimeout': default_task_start_to_close_timeout, + 'description': description, + }) + + def deprecate_activity_type(self, domain, activity_name, activity_version): + """ + Returns information about the specified activity type. This + includes configuration settings provided at registration time + as well as other general information about the type. + + :type domain: string + :param domain: The name of the domain in which the activity + type is registered. + + :type activity_name: string + :param activity_name: The name of this activity. + + :type activity_version: string + :param activity_version: The version of this activity. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateActivityType', { + 'domain': domain, + 'activityType': {'name': activity_name, + 'version': activity_version} + }) + +## Workflow Management + + def register_workflow_type(self, domain, name, version, + task_list=None, + default_child_policy=None, + default_execution_start_to_close_timeout=None, + default_task_start_to_close_timeout=None, + description=None): + """ + Registers a new workflow type and its configuration settings + in the specified domain. + + :type domain: string + :param domain: The name of the domain in which to register + the workflow type. + + :type name: string + :param name: The name of the workflow type. + + :type version: string + :param version: The version of the workflow type. + + :type task_list: list of name, version of tasks + :param task_list: If set, specifies the default task list to use + for scheduling decision tasks for executions of this workflow + type. This default is used only if a task list is not provided + when starting the execution through the StartWorkflowExecution + Action or StartChildWorkflowExecution Decision. + + :type default_child_policy: string + + :param default_child_policy: If set, specifies the default + policy to use for the child workflow executions when a + workflow execution of this type is terminated, by calling the + TerminateWorkflowExecution action explicitly or due to an + expired timeout. This default can be overridden when starting + a workflow execution using the StartWorkflowExecution action + or the StartChildWorkflowExecution Decision. The supported + child policies are: + + * TERMINATE: the child executions will be terminated. + + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its + history. It is up to the decider to take appropriate + actions when it receives an execution history with this + event. + + * ABANDON: no action will be taken. The child executions + will continue to run.no docs + + :type default_execution_start_to_close_timeout: string + :param default_execution_start_to_close_timeout: If set, + specifies the default maximum duration for executions of this + workflow type. You can override this default when starting an + execution through the StartWorkflowExecution Action or + StartChildWorkflowExecution Decision. + + :type default_task_start_to_close_timeout: string + :param default_task_start_to_close_timeout: If set, specifies + the default maximum duration of decision tasks for this + workflow type. This default can be overridden when starting a + workflow execution using the StartWorkflowExecution action or + the StartChildWorkflowExecution Decision. + + :type description: string + :param description: Textual description of the workflow type. + + :raises: SWFTypeAlreadyExistsError, SWFLimitExceededError, + UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RegisterWorkflowType', { + 'domain': domain, + 'name': name, + 'version': version, + 'defaultTaskList': {'name': task_list}, + 'defaultChildPolicy': default_child_policy, + 'defaultExecutionStartToCloseTimeout': default_execution_start_to_close_timeout, + 'defaultTaskStartToCloseTimeout': default_task_start_to_close_timeout, + 'description': description, + }) + + def deprecate_workflow_type(self, domain, workflow_name, workflow_version): + """ + Deprecates the specified workflow type. After a workflow type + has been deprecated, you cannot create new executions of that + type. Executions that were started before the type was + deprecated will continue to run. A deprecated workflow type + may still be used when calling visibility actions. + + :type domain: string + :param domain: The name of the domain in which the workflow + type is registered. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateWorkflowType', { + 'domain': domain, + 'workflowType': {'name': workflow_name, + 'version': workflow_version}, + }) + +## Domain Management + + def register_domain(self, name, + workflow_execution_retention_period_in_days, + description=None): + """ + Registers a new domain. + + :type name: string + :param name: Name of the domain to register. The name must be unique. + + :type workflow_execution_retention_period_in_days: string + + :param workflow_execution_retention_period_in_days: Specifies + the duration *in days* for which the record (including the + history) of workflow executions in this domain should be kept + by the service. After the retention period, the workflow + execution will not be available in the results of visibility + calls. If a duration of NONE is specified, the records for + workflow executions in this domain are not retained at all. + + :type description: string + :param description: Textual description of the domain. + + :raises: SWFDomainAlreadyExistsError, SWFLimitExceededError, + SWFOperationNotPermittedError + """ + return self.json_request('RegisterDomain', { + 'name': name, + 'workflowExecutionRetentionPeriodInDays': workflow_execution_retention_period_in_days, + 'description': description, + }) + + def deprecate_domain(self, name): + """ + Deprecates the specified domain. After a domain has been + deprecated it cannot be used to create new workflow executions + or register new types. However, you can still use visibility + actions on this domain. Deprecating a domain also deprecates + all activity and workflow types registered in the + domain. Executions that were started before the domain was + deprecated will continue to run. + + :type name: string + :param name: The name of the domain to deprecate. + + :raises: UnknownResourceFault, DomainDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateDomain', {'name': name}) + +# Visibility Actions + +## Activity Visibility + + def list_activity_types(self, domain, registration_status, + name=None, + maximum_page_size=None, + next_page_token=None, reverse_order=None): + """ + Returns information about all activities registered in the + specified domain that match the specified name and + registration status. The result includes information like + creation date, current status of the activity, etc. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type domain: string + :param domain: The name of the domain in which the activity + types have been registered. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the activity types to list. Valid values are: + + * REGISTERED + * DEPRECATED + + :type name: string + :param name: If specified, only lists the activity types that + have this name. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextResultToken was returned, the results have more than one + page. To get the next page of results, repeat the call with + the nextPageToken and keep all other arguments unchanged. + + :type reverse_order: boolean + + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the activity + types. + + :raises: SWFOperationNotPermittedError, UnknownResourceFault + """ + return self.json_request('ListActivityTypes', { + 'domain': domain, + 'name': name, + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_activity_type(self, domain, activity_name, activity_version): + """ + Returns information about the specified activity type. This + includes configuration settings provided at registration time + as well as other general information about the type. + + :type domain: string + :param domain: The name of the domain in which the activity + type is registered. + + :type activity_name: string + :param activity_name: The name of this activity. + + :type activity_version: string + :param activity_version: The version of this activity. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeActivityType', { + 'domain': domain, + 'activityType': {'name': activity_name, + 'version': activity_version} + }) + +## Workflow Visibility + + def list_workflow_types(self, domain, registration_status, + maximum_page_size=None, name=None, + next_page_token=None, reverse_order=None): + """ + Returns information about workflow types in the specified + domain. The results may be split into multiple pages that can + be retrieved by making the call repeatedly. + + :type domain: string + :param domain: The name of the domain in which the workflow + types have been registered. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the activity types to list. Valid values are: + + * REGISTERED + * DEPRECATED + + :type name: string + :param name: If specified, lists the workflow type with this name. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the workflow + types. + + :raises: SWFOperationNotPermittedError, UnknownResourceFault + """ + return self.json_request('ListWorkflowTypes', { + 'domain': domain, + 'name': name, + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_workflow_type(self, domain, workflow_name, workflow_version): + """ + Returns information about the specified workflow type. This + includes configuration settings specified when the type was + registered and other information such as creation date, + current status, etc. + + :type domain: string + :param domain: The name of the domain in which this workflow + type is registered. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeWorkflowType', { + 'domain': domain, + 'workflowType': {'name': workflow_name, + 'version': workflow_version} + }) + +## Workflow Execution Visibility + + def describe_workflow_execution(self, domain, run_id, workflow_id): + """ + Returns information about the specified workflow execution + including its type and some statistics. + + :type domain: string + :param domain: The name of the domain containing the + workflow execution. + + :type run_id: string + :param run_id: A system generated unique identifier for the + workflow execution. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated + with the workflow execution. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeWorkflowExecution', { + 'domain': domain, + 'execution': {'runId': run_id, + 'workflowId': workflow_id}, + }) + + def get_workflow_execution_history(self, domain, run_id, workflow_id, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the history of the specified workflow execution. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type domain: string + :param domain: The name of the domain containing the + workflow execution. + + :type run_id: string + :param run_id: A system generated unique identifier for the + workflow execution. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated + with the workflow execution. + + :type maximum_page_size: integer + :param maximum_page_size: Specifies the maximum number of + history events returned in one page. The next page in the + result is identified by the NextPageToken returned. By default + 100 history events are returned in a page but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size larger than 100. + + :type next_page_token: string + :param next_page_token: If a NextPageToken is returned, the + result has more than one pages. To get the next page, repeat + the call and specify the nextPageToken with all other + arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the events in + reverse order. By default the results are returned in + ascending order of the eventTimeStamp of the events. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('GetWorkflowExecutionHistory', { + 'domain': domain, + 'execution': {'runId': run_id, + 'workflowId': workflow_id}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def count_open_workflow_executions(self, domain, latest_date, oldest_date, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None): + """ + Returns the number of open workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + workflow_id, workflow_name/workflow_version and tag are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type latest_date: timestamp + :param latest_date: Specifies the latest start or close date + and time to return. + + :type oldest_date: timestamp + :param oldest_date: Specifies the oldest start or close date + and time to return. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountOpenWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': oldest_date, + 'latestDate': latest_date}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id}, + 'tagFilter': {'tag': tag}, + }) + + def list_open_workflow_executions(self, domain, + oldest_date, + latest_date=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the list of open workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type latest_date: timestamp + :param latest_date: Specifies the latest start or close date + and time to return. + + :type oldest_date: timestamp + :param oldest_date: Specifies the oldest start or close date + and time to return. + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + descending order of the start or the close time of the + executions. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + + """ + return self.json_request('ListOpenWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': oldest_date, + 'latestDate': latest_date}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def count_closed_workflow_executions(self, domain, + start_latest_date=None, + start_oldest_date=None, + close_latest_date=None, + close_oldest_date=None, + close_status=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None): + """ + Returns the number of closed workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + close_status, workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + .. note: + start_latest_date/start_oldest_date and + close_latest_date/close_oldest_date are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type start_latest_date: timestamp + :param start_latest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type start_oldest_date: timestamp + :param start_oldest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type close_latest_date: timestamp + :param close_latest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_oldest_date: timestamp + :param close_oldest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_status: string + :param close_status: The close status that must match the close status + of an execution for it to meet the criteria of this filter. + Valid values are: + + * COMPLETED + * FAILED + * CANCELED + * TERMINATED + * CONTINUED_AS_NEW + * TIMED_OUT + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountClosedWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': start_oldest_date, + 'latestDate': start_latest_date}, + 'closeTimeFilter': {'oldestDate': close_oldest_date, + 'latestDate': close_latest_date}, + 'closeStatusFilter': {'status': close_status}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id} + }) + + def list_closed_workflow_executions(self, domain, + start_latest_date=None, + start_oldest_date=None, + close_latest_date=None, + close_oldest_date=None, + close_status=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the number of closed workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + close_status, workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + .. note: + start_latest_date/start_oldest_date and + close_latest_date/close_oldest_date are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type start_latest_date: timestamp + :param start_latest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type start_oldest_date: timestamp + :param start_oldest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type close_latest_date: timestamp + :param close_latest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_oldest_date: timestamp + :param close_oldest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_status: string + :param close_status: The close status that must match the close status + of an execution for it to meet the criteria of this filter. + Valid values are: + + * COMPLETED + * FAILED + * CANCELED + * TERMINATED + * CONTINUED_AS_NEW + * TIMED_OUT + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + descending order of the start or the close time of the + executions. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('ListClosedWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': start_oldest_date, + 'latestDate': start_latest_date}, + 'closeTimeFilter': {'oldestDate': close_oldest_date, + 'latestDate': close_latest_date}, + 'executionFilter': {'workflowId': workflow_id}, + 'closeStatusFilter': {'status': close_status}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + +## Domain Visibility + + def list_domains(self, registration_status, + maximum_page_size=None, + next_page_token=None, reverse_order=None): + """ + Returns the list of domains registered in the account. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the domains to list. Valid Values: + + * REGISTERED + * DEPRECATED + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the result has more than one + page. To get the next page of results, repeat the call with + the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the domains. + + :raises: SWFOperationNotPermittedError + """ + return self.json_request('ListDomains', { + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_domain(self, name): + """ + Returns information about the specified domain including + description and status. + + :type name: string + :param name: The name of the domain to describe. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeDomain', {'name': name}) + +## Task List Visibility + + def count_pending_decision_tasks(self, domain, task_list): + """ + Returns the estimated number of decision tasks in the + specified task list. The count returned is an approximation + and is not guaranteed to be exact. If you specify a task list + that no decision task was ever scheduled in then 0 will be + returned. + + :type domain: string + :param domain: The name of the domain that contains the task list. + + :type task_list: string + :param task_list: The name of the task list. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountPendingDecisionTasks', { + 'domain': domain, + 'taskList': {'name': task_list} + }) + + def count_pending_activity_tasks(self, domain, task_list): + """ + Returns the estimated number of activity tasks in the + specified task list. The count returned is an approximation + and is not guaranteed to be exact. If you specify a task list + that no activity task was ever scheduled in then 0 will be + returned. + + :type domain: string + :param domain: The name of the domain that contains the task list. + + :type task_list: string + :param task_list: The name of the task list. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountPendingActivityTasks', { + 'domain': domain, + 'taskList': {'name': task_list} + }) diff --git a/awx/lib/site-packages/boto/swf/layer1_decisions.py b/awx/lib/site-packages/boto/swf/layer1_decisions.py new file mode 100644 index 0000000000..6c273aa14d --- /dev/null +++ b/awx/lib/site-packages/boto/swf/layer1_decisions.py @@ -0,0 +1,287 @@ +""" +Helper class for creating decision responses. +""" + + +class Layer1Decisions: + """ + Use this object to build a list of decisions for a decision response. + Each method call will add append a new decision. Retrieve the list + of decisions from the _data attribute. + + """ + def __init__(self): + self._data = [] + + def schedule_activity_task(self, + activity_id, + activity_type_name, + activity_type_version, + task_list=None, + control=None, + heartbeat_timeout=None, + schedule_to_close_timeout=None, + schedule_to_start_timeout=None, + start_to_close_timeout=None, + input=None): + """ + Schedules an activity task. + + :type activity_id: string + :param activity_id: The activityId of the type of the activity + being scheduled. + + :type activity_type_name: string + :param activity_type_name: The name of the type of the activity + being scheduled. + + :type activity_type_version: string + :param activity_type_version: The version of the type of the + activity being scheduled. + + :type task_list: string + :param task_list: If set, specifies the name of the task list in + which to schedule the activity task. If not specified, the + defaultTaskList registered with the activity type will be used. + Note: a task list for this activity task must be specified either + as a default for the activity type or through this field. If + neither this field is set nor a default task list was specified + at registration time then a fault will be returned. + """ + o = {} + o['decisionType'] = 'ScheduleActivityTask' + attrs = o['scheduleActivityTaskDecisionAttributes'] = {} + attrs['activityId'] = activity_id + attrs['activityType'] = { + 'name': activity_type_name, + 'version': activity_type_version, + } + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if control is not None: + attrs['control'] = control + if heartbeat_timeout is not None: + attrs['heartbeatTimeout'] = heartbeat_timeout + if schedule_to_close_timeout is not None: + attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout + if schedule_to_start_timeout is not None: + attrs['scheduleToStartTimeout'] = schedule_to_start_timeout + if start_to_close_timeout is not None: + attrs['startToCloseTimeout'] = start_to_close_timeout + if input is not None: + attrs['input'] = input + self._data.append(o) + + def request_cancel_activity_task(self, activity_id): + """ + Attempts to cancel a previously scheduled activity task. If + the activity task was scheduled but has not been assigned to a + worker, then it will be canceled. If the activity task was + already assigned to a worker, then the worker will be informed + that cancellation has been requested in the response to + RecordActivityTaskHeartbeat. + """ + o = {} + o['decisionType'] = 'RequestCancelActivityTask' + attrs = o['requestCancelActivityTaskDecisionAttributes'] = {} + attrs['activityId'] = activity_id + self._data.append(o) + + def record_marker(self, marker_name, details=None): + """ + Records a MarkerRecorded event in the history. Markers can be + used for adding custom information in the history for instance + to let deciders know that they do not need to look at the + history beyond the marker event. + """ + o = {} + o['decisionType'] = 'RecordMarker' + attrs = o['recordMarkerDecisionAttributes'] = {} + attrs['markerName'] = marker_name + if details is not None: + attrs['details'] = details + self._data.append(o) + + def complete_workflow_execution(self, result=None): + """ + Closes the workflow execution and records a WorkflowExecutionCompleted + event in the history + """ + o = {} + o['decisionType'] = 'CompleteWorkflowExecution' + attrs = o['completeWorkflowExecutionDecisionAttributes'] = {} + if result is not None: + attrs['result'] = result + self._data.append(o) + + def fail_workflow_execution(self, reason=None, details=None): + """ + Closes the workflow execution and records a + WorkflowExecutionFailed event in the history. + """ + o = {} + o['decisionType'] = 'FailWorkflowExecution' + attrs = o['failWorkflowExecutionDecisionAttributes'] = {} + if reason is not None: + attrs['reason'] = reason + if details is not None: + attrs['details'] = details + self._data.append(o) + + def cancel_workflow_executions(self, details=None): + """ + Closes the workflow execution and records a WorkflowExecutionCanceled + event in the history. + """ + o = {} + o['decisionType'] = 'CancelWorkflowExecution' + attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {} + if details is not None: + attrs['details'] = details + self._data.append(o) + + def continue_as_new_workflow_execution(self, + child_policy=None, + execution_start_to_close_timeout=None, + input=None, + tag_list=None, + task_list=None, + start_to_close_timeout=None, + workflow_type_version=None): + """ + Closes the workflow execution and starts a new workflow execution of + the same type using the same workflow id and a unique run Id. A + WorkflowExecutionContinuedAsNew event is recorded in the history. + """ + o = {} + o['decisionType'] = 'ContinueAsNewWorkflowExecution' + attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {} + if child_policy is not None: + attrs['childPolicy'] = child_policy + if execution_start_to_close_timeout is not None: + attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout + if input is not None: + attrs['input'] = input + if tag_list is not None: + attrs['tagList'] = tag_list + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if start_to_close_timeout is not None: + attrs['startToCloseTimeout'] = start_to_close_timeout + if workflow_type_version is not None: + attrs['workflowTypeVersion'] = workflow_type_version + self._data.append(o) + + def start_timer(self, + start_to_fire_timeout, + timer_id, + control=None): + """ + Starts a timer for this workflow execution and records a TimerStarted + event in the history. This timer will fire after the specified delay + and record a TimerFired event. + """ + o = {} + o['decisionType'] = 'StartTimer' + attrs = o['startTimerDecisionAttributes'] = {} + attrs['startToFireTimeout'] = start_to_fire_timeout + attrs['timerId'] = timer_id + if control is not None: + attrs['control'] = control + self._data.append(o) + + def cancel_timer(self, timer_id): + """ + Cancels a previously started timer and records a TimerCanceled + event in the history. + """ + o = {} + o['decisionType'] = 'CancelTimer' + attrs = o['cancelTimerDecisionAttributes'] = {} + attrs['timerId'] = timer_id + self._data.append(o) + + def signal_external_workflow_execution(self, + workflow_id, + signal_name, + run_id=None, + control=None, + input=None): + """ + Requests a signal to be delivered to the specified external workflow + execution and records a SignalExternalWorkflowExecutionInitiated + event in the history. + """ + o = {} + o['decisionType'] = 'SignalExternalWorkflowExecution' + attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowId'] = workflow_id + attrs['signalName'] = signal_name + if run_id is not None: + attrs['runId'] = run_id + if control is not None: + attrs['control'] = control + if input is not None: + attrs['input'] = input + self._data.append(o) + + def request_cancel_external_workflow_execution(self, + workflow_id, + control=None, + run_id=None): + """ + Requests that a request be made to cancel the specified + external workflow execution and records a + RequestCancelExternalWorkflowExecutionInitiated event in the + history. + """ + o = {} + o['decisionType'] = 'RequestCancelExternalWorkflowExecution' + attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowId'] = workflow_id + if control is not None: + attrs['control'] = control + if run_id is not None: + attrs['runId'] = run_id + self._data.append(o) + + def start_child_workflow_execution(self, + workflow_type_name, + workflow_type_version, + workflow_id, + child_policy=None, + control=None, + execution_start_to_close_timeout=None, + input=None, + tag_list=None, + task_list=None, + task_start_to_close_timeout=None): + """ + Requests that a child workflow execution be started and + records a StartChildWorkflowExecutionInitiated event in the + history. The child workflow execution is a separate workflow + execution with its own history. + """ + o = {} + o['decisionType'] = 'StartChildWorkflowExecution' + attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowType'] = { + 'name': workflow_type_name, + 'version': workflow_type_version, + } + attrs['workflowId'] = workflow_id + if child_policy is not None: + attrs['childPolicy'] = child_policy + if control is not None: + attrs['control'] = control + if execution_start_to_close_timeout is not None: + attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout + if input is not None: + attrs['input'] = input + if tag_list is not None: + attrs['tagList'] = tag_list + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if task_start_to_close_timeout is not None: + attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout + self._data.append(o) diff --git a/awx/lib/site-packages/boto/swf/layer2.py b/awx/lib/site-packages/boto/swf/layer2.py new file mode 100644 index 0000000000..dd467b42a6 --- /dev/null +++ b/awx/lib/site-packages/boto/swf/layer2.py @@ -0,0 +1,336 @@ +"""Object-oriented interface to SWF wrapping boto.swf.layer1.Layer1""" + +import time +from functools import wraps +from boto.swf.layer1 import Layer1 +from boto.swf.layer1_decisions import Layer1Decisions + +DEFAULT_CREDENTIALS = { + 'aws_access_key_id': None, + 'aws_secret_access_key': None +} + +def set_default_credentials(aws_access_key_id, aws_secret_access_key): + """Set default credentials.""" + DEFAULT_CREDENTIALS.update({ + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, + }) + +class SWFBase(object): + + name = None + domain = None + aws_access_key_id = None + aws_secret_access_key = None + + def __init__(self, **kwargs): + # Set default credentials. + for credkey in ('aws_access_key_id', 'aws_secret_access_key'): + if DEFAULT_CREDENTIALS.get(credkey): + setattr(self, credkey, DEFAULT_CREDENTIALS[credkey]) + # Override attributes with keyword args. + for kwarg in kwargs: + setattr(self, kwarg, kwargs[kwarg]) + + self._swf = Layer1(self.aws_access_key_id, + self.aws_secret_access_key) + + def __repr__(self): + rep_str = str(self.name) + if hasattr(self, 'version'): + rep_str += '-' + str(getattr(self, 'version')) + return '<%s %r at 0x%x>' % (self.__class__.__name__, rep_str, id(self)) + +class Domain(SWFBase): + + """Simple Workflow Domain.""" + + description = None + retention = 30 + @wraps(Layer1.describe_domain) + def describe(self): + """DescribeDomain.""" + return self._swf.describe_domain(self.name) + + @wraps(Layer1.deprecate_domain) + def deprecate(self): + """DeprecateDomain""" + self._swf.deprecate_domain(self.name) + + @wraps(Layer1.register_domain) + def register(self): + """RegisterDomain.""" + self._swf.register_domain(self.name, str(self.retention), + self.description) + + @wraps(Layer1.list_activity_types) + def activities(self, status='REGISTERED', **kwargs): + """ListActivityTypes.""" + act_types = self._swf.list_activity_types(self.name, status, **kwargs) + act_objects = [] + for act_args in act_types['typeInfos']: + act_ident = act_args['activityType'] + del act_args['activityType'] + act_args.update(act_ident) + act_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + }) + act_objects.append(ActivityType(**act_args)) + return act_objects + + @wraps(Layer1.list_workflow_types) + def workflows(self, status='REGISTERED', **kwargs): + """ListWorkflowTypes.""" + wf_types = self._swf.list_workflow_types(self.name, status, **kwargs) + wf_objects = [] + for wf_args in wf_types['typeInfos']: + wf_ident = wf_args['workflowType'] + del wf_args['workflowType'] + wf_args.update(wf_ident) + wf_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + }) + + wf_objects.append(WorkflowType(**wf_args)) + return wf_objects + + def executions(self, closed=False, **kwargs): + """List list open/closed executions. + + For a full list of available parameters refer to + :py:func:`boto.swf.layer1.Layer1.list_closed_workflow_executions` and + :py:func:`boto.swf.layer1.Layer1.list_open_workflow_executions` + """ + if closed: + executions = self._swf.list_closed_workflow_executions(self.name, + **kwargs) + else: + if 'oldest_date' not in kwargs: + # Last 24 hours. + kwargs['oldest_date'] = time.time() - (3600 * 24) + executions = self._swf.list_open_workflow_executions(self.name, + **kwargs) + exe_objects = [] + for exe_args in executions['executionInfos']: + for nested_key in ('execution', 'workflowType'): + nested_dict = exe_args[nested_key] + del exe_args[nested_key] + exe_args.update(nested_dict) + + exe_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + }) + + exe_objects.append(WorkflowExecution(**exe_args)) + return exe_objects + + @wraps(Layer1.count_pending_activity_tasks) + def count_pending_activity_tasks(self, task_list): + """CountPendingActivityTasks.""" + return self._swf.count_pending_activity_tasks(self.name, task_list) + + @wraps(Layer1.count_pending_decision_tasks) + def count_pending_decision_tasks(self, task_list): + """CountPendingDecisionTasks.""" + return self._swf.count_pending_decision_tasks(self.name, task_list) + + +class Actor(SWFBase): + + task_list = None + last_tasktoken = None + domain = None + + def run(self): + """To be overloaded by subclasses.""" + raise NotImplementedError() + +class ActivityWorker(Actor): + + """Base class for SimpleWorkflow activity workers.""" + + @wraps(Layer1.respond_activity_task_canceled) + def cancel(self, task_token=None, details=None): + """RespondActivityTaskCanceled.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_canceled(task_token, details) + + @wraps(Layer1.respond_activity_task_completed) + def complete(self, task_token=None, result=None): + """RespondActivityTaskCompleted.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_completed(task_token, result) + + @wraps(Layer1.respond_activity_task_failed) + def fail(self, task_token=None, details=None, reason=None): + """RespondActivityTaskFailed.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_failed(task_token, details, + reason) + + @wraps(Layer1.record_activity_task_heartbeat) + def heartbeat(self, task_token=None, details=None): + """RecordActivityTaskHeartbeat.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.record_activity_task_heartbeat(task_token, details) + + @wraps(Layer1.poll_for_activity_task) + def poll(self, **kwargs): + """PollForActivityTask.""" + task = self._swf.poll_for_activity_task(self.domain, self.task_list, + **kwargs) + self.last_tasktoken = task.get('taskToken') + return task + +class Decider(Actor): + + """Base class for SimpleWorkflow deciders.""" + + @wraps(Layer1.respond_decision_task_completed) + def complete(self, task_token=None, decisions=None, **kwargs): + """RespondDecisionTaskCompleted.""" + if isinstance(decisions, Layer1Decisions): + # Extract decision list from a Layer1Decisions instance. + decisions = decisions._data + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_decision_task_completed(task_token, decisions, + **kwargs) + + @wraps(Layer1.poll_for_decision_task) + def poll(self, **kwargs): + """PollForDecisionTask.""" + result = self._swf.poll_for_decision_task(self.domain, self.task_list, + **kwargs) + # Record task token. + self.last_tasktoken = result.get('taskToken') + # Record the last event. + return result + +class WorkflowType(SWFBase): + + """A versioned workflow type.""" + + version = None + task_list = None + child_policy = 'TERMINATE' + + @wraps(Layer1.describe_workflow_type) + def describe(self): + """DescribeWorkflowType.""" + return self._swf.describe_workflow_type(self.domain, self.name, + self.version) + @wraps(Layer1.register_workflow_type) + def register(self, **kwargs): + """RegisterWorkflowType.""" + args = { + 'default_execution_start_to_close_timeout': '3600', + 'default_task_start_to_close_timeout': '300', + 'default_child_policy': 'TERMINATE', + } + args.update(kwargs) + self._swf.register_workflow_type(self.domain, self.name, self.version, + **args) + + @wraps(Layer1.deprecate_workflow_type) + def deprecate(self): + """DeprecateWorkflowType.""" + self._swf.deprecate_workflow_type(self.domain, self.name, self.version) + + @wraps(Layer1.start_workflow_execution) + def start(self, **kwargs): + """StartWorkflowExecution.""" + if 'workflow_id' in kwargs: + workflow_id = kwargs['workflow_id'] + del kwargs['workflow_id'] + else: + workflow_id = '%s-%s-%i' % (self.name, self.version, time.time()) + + for def_attr in ('task_list', 'child_policy'): + kwargs[def_attr] = kwargs.get(def_attr, getattr(self, def_attr)) + run_id = self._swf.start_workflow_execution(self.domain, workflow_id, + self.name, self.version, **kwargs)['runId'] + return WorkflowExecution(name=self.name, version=self.version, + runId=run_id, domain=self.domain, workflowId=workflow_id, + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.aws_secret_access_key) + +class WorkflowExecution(SWFBase): + + """An instance of a workflow.""" + + workflowId = None + runId = None + + @wraps(Layer1.signal_workflow_execution) + def signal(self, signame, **kwargs): + """SignalWorkflowExecution.""" + self._swf.signal_workflow_execution(self.domain, signame, + self.workflowId, **kwargs) + + @wraps(Layer1.terminate_workflow_execution) + def terminate(self, **kwargs): + """TerminateWorkflowExecution (p. 103).""" + return self._swf.terminate_workflow_execution(self.domain, + self.workflowId, **kwargs) + + @wraps(Layer1.get_workflow_execution_history) + def history(self, **kwargs): + """GetWorkflowExecutionHistory.""" + return self._swf.get_workflow_execution_history(self.domain, self.runId, + self.workflowId, **kwargs)['events'] + + @wraps(Layer1.describe_workflow_execution) + def describe(self): + """DescribeWorkflowExecution.""" + return self._swf.describe_workflow_execution(self.domain, self.runId, + self.workflowId) + + @wraps(Layer1.request_cancel_workflow_execution) + def request_cancel(self): + """RequestCancelWorkflowExecution.""" + return self._swf.request_cancel_workflow_execution(self.domain, + self.workflowId, self.runId) + + +class ActivityType(SWFBase): + + """A versioned activity type.""" + + version = None + + @wraps(Layer1.deprecate_activity_type) + def deprecate(self): + """DeprecateActivityType.""" + return self._swf.deprecate_activity_type(self.domain, self.name, + self.version) + + @wraps(Layer1.describe_activity_type) + def describe(self): + """DescribeActivityType.""" + return self._swf.describe_activity_type(self.domain, self.name, + self.version) + + @wraps(Layer1.register_activity_type) + def register(self, **kwargs): + """RegisterActivityType.""" + args = { + 'default_task_heartbeat_timeout': '600', + 'default_task_schedule_to_close_timeout': '3900', + 'default_task_schedule_to_start_timeout': '300', + 'default_task_start_to_close_timeout': '3600', + } + args.update(kwargs) + self._swf.register_activity_type(self.domain, self.name, self.version, + **args) diff --git a/awx/lib/site-packages/boto/utils.py b/awx/lib/site-packages/boto/utils.py new file mode 100644 index 0000000000..6d89b21f5f --- /dev/null +++ b/awx/lib/site-packages/boto/utils.py @@ -0,0 +1,967 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# +# Parts of this code were copied or derived from sample code supplied by AWS. +# The following notice applies to that code. +# +# This software code is made available "AS IS" without warranties of any +# kind. You may copy, display, modify and redistribute the software +# code either by itself or as incorporated into your code; provided that +# you do not remove any proprietary notices. Your use of this software +# code is at your own risk and you waive any claim against Amazon +# Digital Services, Inc. or its affiliates with respect to your use of +# this software code. (c) 2006 Amazon Digital Services, Inc. or its +# affiliates. + +""" +Some handy utility functions used by several classes. +""" + +import socket +import urllib +import urllib2 +import imp +import subprocess +import StringIO +import time +import logging.handlers +import boto +import boto.provider +import tempfile +import smtplib +import datetime +import re +import email.mime.multipart +import email.mime.base +import email.mime.text +import email.utils +import email.encoders +import gzip +import base64 +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + + +try: + import hashlib + _hashfn = hashlib.sha512 +except ImportError: + import md5 + _hashfn = md5.md5 + +from boto.compat import json + +# List of Query String Arguments of Interest +qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging', + 'partNumber', 'policy', 'requestPayment', 'torrent', + 'versioning', 'versionId', 'versions', 'website', + 'uploads', 'uploadId', 'response-content-type', + 'response-content-language', 'response-expires', + 'response-cache-control', 'response-content-disposition', + 'response-content-encoding', 'delete', 'lifecycle', + 'tagging', 'restore', + # storageClass is a QSA for buckets in Google Cloud Storage. + # (StorageClass is associated to individual keys in S3, but + # having it listed here should cause no problems because + # GET bucket?storageClass is not part of the S3 API.) + 'storageClass', + # websiteConfig is a QSA for buckets in Google Cloud Storage. + 'websiteConfig', + # compose is a QSA for objects in Google Cloud Storage. + 'compose'] + + +_first_cap_regex = re.compile('(.)([A-Z][a-z]+)') +_number_cap_regex = re.compile('([a-z])([0-9]+)') +_end_cap_regex = re.compile('([a-z0-9])([A-Z])') + + +def unquote_v(nv): + if len(nv) == 1: + return nv + else: + return (nv[0], urllib.unquote(nv[1])) + + +def canonical_string(method, path, headers, expires=None, + provider=None): + """ + Generates the aws canonical string for the given parameters + """ + if not provider: + provider = boto.provider.get_default() + interesting_headers = {} + for key in headers: + lk = key.lower() + if headers[key] != None and (lk in ['content-md5', 'content-type', 'date'] or + lk.startswith(provider.header_prefix)): + interesting_headers[lk] = str(headers[key]).strip() + + # these keys get empty strings if they don't exist + if 'content-type' not in interesting_headers: + interesting_headers['content-type'] = '' + if 'content-md5' not in interesting_headers: + interesting_headers['content-md5'] = '' + + # just in case someone used this. it's not necessary in this lib. + if provider.date_header in interesting_headers: + interesting_headers['date'] = '' + + # if you're using expires for query string auth, then it trumps date + # (and provider.date_header) + if expires: + interesting_headers['date'] = str(expires) + + sorted_header_keys = sorted(interesting_headers.keys()) + + buf = "%s\n" % method + for key in sorted_header_keys: + val = interesting_headers[key] + if key.startswith(provider.header_prefix): + buf += "%s:%s\n" % (key, val) + else: + buf += "%s\n" % val + + # don't include anything after the first ? in the resource... + # unless it is one of the QSA of interest, defined above + t = path.split('?') + buf += t[0] + + if len(t) > 1: + qsa = t[1].split('&') + qsa = [a.split('=', 1) for a in qsa] + qsa = [unquote_v(a) for a in qsa if a[0] in qsa_of_interest] + if len(qsa) > 0: + qsa.sort(cmp=lambda x, y:cmp(x[0], y[0])) + qsa = ['='.join(a) for a in qsa] + buf += '?' + buf += '&'.join(qsa) + + return buf + + +def merge_meta(headers, metadata, provider=None): + if not provider: + provider = boto.provider.get_default() + metadata_prefix = provider.metadata_prefix + final_headers = headers.copy() + for k in metadata.keys(): + if k.lower() in ['cache-control', 'content-md5', 'content-type', + 'content-encoding', 'content-disposition', + 'expires']: + final_headers[k] = metadata[k] + else: + final_headers[metadata_prefix + k] = metadata[k] + + return final_headers + + +def get_aws_metadata(headers, provider=None): + if not provider: + provider = boto.provider.get_default() + metadata_prefix = provider.metadata_prefix + metadata = {} + for hkey in headers.keys(): + if hkey.lower().startswith(metadata_prefix): + val = urllib.unquote_plus(headers[hkey]) + try: + metadata[hkey[len(metadata_prefix):]] = unicode(val, 'utf-8') + except UnicodeDecodeError: + metadata[hkey[len(metadata_prefix):]] = val + del headers[hkey] + return metadata + + +def retry_url(url, retry_on_404=True, num_retries=10): + """ + Retry a url. This is specifically used for accessing the metadata + service on an instance. Since this address should never be proxied + (for security reasons), we create a ProxyHandler with a NULL + dictionary to override any proxy settings in the environment. + """ + for i in range(0, num_retries): + try: + proxy_handler = urllib2.ProxyHandler({}) + opener = urllib2.build_opener(proxy_handler) + req = urllib2.Request(url) + r = opener.open(req) + result = r.read() + return result + except urllib2.HTTPError, e: + # in 2.6 you use getcode(), in 2.5 and earlier you use code + if hasattr(e, 'getcode'): + code = e.getcode() + else: + code = e.code + if code == 404 and not retry_on_404: + return '' + except Exception, e: + pass + boto.log.exception('Caught exception reading instance data') + # If not on the last iteration of the loop then sleep. + if i + 1 != num_retries: + time.sleep(2 ** i) + boto.log.error('Unable to read instance data, giving up') + return '' + + +def _get_instance_metadata(url, num_retries): + return LazyLoadMetadata(url, num_retries) + + +class LazyLoadMetadata(dict): + def __init__(self, url, num_retries): + self._url = url + self._num_retries = num_retries + self._leaves = {} + self._dicts = [] + data = boto.utils.retry_url(self._url, num_retries=self._num_retries) + if data: + fields = data.split('\n') + for field in fields: + if field.endswith('/'): + key = field[0:-1] + self._dicts.append(key) + else: + p = field.find('=') + if p > 0: + key = field[p + 1:] + resource = field[0:p] + '/openssh-key' + else: + key = resource = field + self._leaves[key] = resource + self[key] = None + + def _materialize(self): + for key in self: + self[key] + + def __getitem__(self, key): + if key not in self: + # allow dict to throw the KeyError + return super(LazyLoadMetadata, self).__getitem__(key) + + # already loaded + val = super(LazyLoadMetadata, self).__getitem__(key) + if val is not None: + return val + + if key in self._leaves: + resource = self._leaves[key] + val = boto.utils.retry_url(self._url + urllib.quote(resource, + safe="/:"), + num_retries=self._num_retries) + if val and val[0] == '{': + val = json.loads(val) + else: + p = val.find('\n') + if p > 0: + val = val.split('\n') + self[key] = val + elif key in self._dicts: + self[key] = LazyLoadMetadata(self._url + key + '/', + self._num_retries) + + return super(LazyLoadMetadata, self).__getitem__(key) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def values(self): + self._materialize() + return super(LazyLoadMetadata, self).values() + + def items(self): + self._materialize() + return super(LazyLoadMetadata, self).items() + + def __str__(self): + self._materialize() + return super(LazyLoadMetadata, self).__str__() + + def __repr__(self): + self._materialize() + return super(LazyLoadMetadata, self).__repr__() + + +def _build_instance_metadata_url(url, version, path): + """ + Builds an EC2 metadata URL for fetching information about an instance. + + Requires the following arguments: a URL, a version and a path. + + Example: + + >>> _build_instance_metadata_url('http://169.254.169.254', 'latest', 'meta-data') + http://169.254.169.254/latest/meta-data/ + + """ + return '%s/%s/%s/' % (url, version, path) + + +def get_instance_metadata(version='latest', url='http://169.254.169.254', + data='meta-data', timeout=None, num_retries=5): + """ + Returns the instance metadata as a nested Python dictionary. + Simple values (e.g. local_hostname, hostname, etc.) will be + stored as string values. Values such as ancestor-ami-ids will + be stored in the dict as a list of string values. More complex + fields such as public-keys and will be stored as nested dicts. + + If the timeout is specified, the connection to the specified url + will time out after the specified number of seconds. + + """ + if timeout is not None: + original = socket.getdefaulttimeout() + socket.setdefaulttimeout(timeout) + try: + metadata_url = _build_instance_metadata_url(url, version, data) + return _get_instance_metadata(metadata_url, num_retries=num_retries) + except urllib2.URLError, e: + return None + finally: + if timeout is not None: + socket.setdefaulttimeout(original) + + +def get_instance_identity(version='latest', url='http://169.254.169.254', + timeout=None, num_retries=5): + """ + Returns the instance identity as a nested Python dictionary. + """ + iid = {} + base_url = _build_instance_metadata_url(url, version, 'dynamic/instance-identity') + if timeout is not None: + original = socket.getdefaulttimeout() + socket.setdefaulttimeout(timeout) + try: + data = retry_url(base_url, num_retries=num_retries) + fields = data.split('\n') + for field in fields: + val = retry_url(base_url + '/' + field + '/') + if val[0] == '{': + val = json.loads(val) + if field: + iid[field] = val + return iid + except urllib2.URLError, e: + return None + finally: + if timeout is not None: + socket.setdefaulttimeout(original) + + +def get_instance_userdata(version='latest', sep=None, + url='http://169.254.169.254'): + ud_url = _build_instance_metadata_url(url, version, 'user-data') + user_data = retry_url(ud_url, retry_on_404=False) + if user_data: + if sep: + l = user_data.split(sep) + user_data = {} + for nvpair in l: + t = nvpair.split('=') + user_data[t[0].strip()] = t[1].strip() + return user_data + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' +ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ' +RFC1123 = '%a, %d %b %Y %H:%M:%S %Z' + +def get_ts(ts=None): + if not ts: + ts = time.gmtime() + return time.strftime(ISO8601, ts) + + +def parse_ts(ts): + ts = ts.strip() + try: + dt = datetime.datetime.strptime(ts, ISO8601) + return dt + except ValueError: + try: + dt = datetime.datetime.strptime(ts, ISO8601_MS) + return dt + except ValueError: + dt = datetime.datetime.strptime(ts, RFC1123) + return dt + +def find_class(module_name, class_name=None): + if class_name: + module_name = "%s.%s" % (module_name, class_name) + modules = module_name.split('.') + c = None + + try: + for m in modules[1:]: + if c: + c = getattr(c, m) + else: + c = getattr(__import__(".".join(modules[0:-1])), m) + return c + except: + return None + + +def update_dme(username, password, dme_id, ip_address): + """ + Update your Dynamic DNS record with DNSMadeEasy.com + """ + dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip' + dme_url += '?username=%s&password=%s&id=%s&ip=%s' + s = urllib2.urlopen(dme_url % (username, password, dme_id, ip_address)) + return s.read() + + +def fetch_file(uri, file=None, username=None, password=None): + """ + Fetch a file based on the URI provided. If you do not pass in a file pointer + a tempfile.NamedTemporaryFile, or None if the file could not be + retrieved is returned. + The URI can be either an HTTP url, or "s3://bucket_name/key_name" + """ + boto.log.info('Fetching %s' % uri) + if file == None: + file = tempfile.NamedTemporaryFile() + try: + if uri.startswith('s3://'): + bucket_name, key_name = uri[len('s3://'):].split('/', 1) + c = boto.connect_s3(aws_access_key_id=username, + aws_secret_access_key=password) + bucket = c.get_bucket(bucket_name) + key = bucket.get_key(key_name) + key.get_contents_to_file(file) + else: + if username and password: + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + passman.add_password(None, uri, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) + s = urllib2.urlopen(uri) + file.write(s.read()) + file.seek(0) + except: + raise + boto.log.exception('Problem Retrieving file: %s' % uri) + file = None + return file + + +class ShellCommand(object): + + def __init__(self, command, wait=True, fail_fast=False, cwd=None): + self.exit_code = 0 + self.command = command + self.log_fp = StringIO.StringIO() + self.wait = wait + self.fail_fast = fail_fast + self.run(cwd=cwd) + + def run(self, cwd=None): + boto.log.info('running:%s' % self.command) + self.process = subprocess.Popen(self.command, shell=True, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=cwd) + if(self.wait): + while self.process.poll() == None: + time.sleep(1) + t = self.process.communicate() + self.log_fp.write(t[0]) + self.log_fp.write(t[1]) + boto.log.info(self.log_fp.getvalue()) + self.exit_code = self.process.returncode + + if self.fail_fast and self.exit_code != 0: + raise Exception("Command " + self.command + " failed with status " + self.exit_code) + + return self.exit_code + + def setReadOnly(self, value): + raise AttributeError + + def getStatus(self): + return self.exit_code + + status = property(getStatus, setReadOnly, None, 'The exit code for the command') + + def getOutput(self): + return self.log_fp.getvalue() + + output = property(getOutput, setReadOnly, None, 'The STDIN and STDERR output of the command') + + +class AuthSMTPHandler(logging.handlers.SMTPHandler): + """ + This class extends the SMTPHandler in the standard Python logging module + to accept a username and password on the constructor and to then use those + credentials to authenticate with the SMTP server. To use this, you could + add something like this in your boto config file: + + [handler_hand07] + class=boto.utils.AuthSMTPHandler + level=WARN + formatter=form07 + args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject') + """ + + def __init__(self, mailhost, username, password, + fromaddr, toaddrs, subject): + """ + Initialize the handler. + + We have extended the constructor to accept a username/password + for SMTP authentication. + """ + logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, + toaddrs, subject) + self.username = username + self.password = password + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + It would be really nice if I could add authorization to this class + without having to resort to cut and paste inheritance but, no. + """ + try: + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + smtp.login(self.username, self.password) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + ','.join(self.toaddrs), + self.getSubject(record), + email.utils.formatdate(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handleError(record) + + +class LRUCache(dict): + """A dictionary-like object that stores only a certain number of items, and + discards its least recently used item when full. + + >>> cache = LRUCache(3) + >>> cache['A'] = 0 + >>> cache['B'] = 1 + >>> cache['C'] = 2 + >>> len(cache) + 3 + + >>> cache['A'] + 0 + + Adding new items to the cache does not increase its size. Instead, the least + recently used item is dropped: + + >>> cache['D'] = 3 + >>> len(cache) + 3 + >>> 'B' in cache + False + + Iterating over the cache returns the keys, starting with the most recently + used: + + >>> for key in cache: + ... print key + D + A + C + + This code is based on the LRUCache class from Genshi which is based on + `Myghty `_'s LRUCache from ``myghtyutils.util``, + written by Mike Bayer and released under the MIT license (Genshi uses the + BSD License). + """ + + class _Item(object): + def __init__(self, key, value): + self.previous = self.next = None + self.key = key + self.value = value + + def __repr__(self): + return repr(self.value) + + def __init__(self, capacity): + self._dict = dict() + self.capacity = capacity + self.head = None + self.tail = None + + def __contains__(self, key): + return key in self._dict + + def __iter__(self): + cur = self.head + while cur: + yield cur.key + cur = cur.next + + def __len__(self): + return len(self._dict) + + def __getitem__(self, key): + item = self._dict[key] + self._update_item(item) + return item.value + + def __setitem__(self, key, value): + item = self._dict.get(key) + if item is None: + item = self._Item(key, value) + self._dict[key] = item + self._insert_item(item) + else: + item.value = value + self._update_item(item) + self._manage_size() + + def __repr__(self): + return repr(self._dict) + + def _insert_item(self, item): + item.previous = None + item.next = self.head + if self.head is not None: + self.head.previous = item + else: + self.tail = item + self.head = item + self._manage_size() + + def _manage_size(self): + while len(self._dict) > self.capacity: + del self._dict[self.tail.key] + if self.tail != self.head: + self.tail = self.tail.previous + self.tail.next = None + else: + self.head = self.tail = None + + def _update_item(self, item): + if self.head == item: + return + + previous = item.previous + previous.next = item.next + if item.next is not None: + item.next.previous = previous + else: + self.tail = previous + + item.previous = None + item.next = self.head + self.head.previous = self.head = item + + +class Password(object): + """ + Password object that stores itself as hashed. + Hash defaults to SHA512 if available, MD5 otherwise. + """ + hashfunc = _hashfn + + def __init__(self, str=None, hashfunc=None): + """ + Load the string from an initial value, this should be the + raw hashed password. + """ + self.str = str + if hashfunc: + self.hashfunc = hashfunc + + def set(self, value): + self.str = self.hashfunc(value).hexdigest() + + def __str__(self): + return str(self.str) + + def __eq__(self, other): + if other == None: + return False + return str(self.hashfunc(other).hexdigest()) == str(self.str) + + def __len__(self): + if self.str: + return len(self.str) + else: + return 0 + + +def notify(subject, body=None, html_body=None, to_string=None, + attachments=None, append_instance_id=True): + attachments = attachments or [] + if append_instance_id: + subject = "[%s] %s" % (boto.config.get_value("Instance", "instance-id"), subject) + if not to_string: + to_string = boto.config.get_value('Notification', 'smtp_to', None) + if to_string: + try: + from_string = boto.config.get_value('Notification', 'smtp_from', 'boto') + msg = email.mime.multipart.MIMEMultipart() + msg['From'] = from_string + msg['Reply-To'] = from_string + msg['To'] = to_string + msg['Date'] = email.utils.formatdate(localtime=True) + msg['Subject'] = subject + + if body: + msg.attach(email.mime.text.MIMEText(body)) + + if html_body: + part = email.mime.base.MIMEBase('text', 'html') + part.set_payload(html_body) + email.encoders.encode_base64(part) + msg.attach(part) + + for part in attachments: + msg.attach(part) + + smtp_host = boto.config.get_value('Notification', 'smtp_host', 'localhost') + + # Alternate port support + if boto.config.get_value("Notification", "smtp_port"): + server = smtplib.SMTP(smtp_host, int(boto.config.get_value("Notification", "smtp_port"))) + else: + server = smtplib.SMTP(smtp_host) + + # TLS support + if boto.config.getbool("Notification", "smtp_tls"): + server.ehlo() + server.starttls() + server.ehlo() + smtp_user = boto.config.get_value('Notification', 'smtp_user', '') + smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '') + if smtp_user: + server.login(smtp_user, smtp_pass) + server.sendmail(from_string, to_string, msg.as_string()) + server.quit() + except: + boto.log.exception('notify failed') + + +def get_utf8_value(value): + if not isinstance(value, str) and not isinstance(value, unicode): + value = str(value) + if isinstance(value, unicode): + return value.encode('utf-8') + else: + return value + + +def mklist(value): + if not isinstance(value, list): + if isinstance(value, tuple): + value = list(value) + else: + value = [value] + return value + + +def pythonize_name(name): + """Convert camel case to a "pythonic" name. + + Examples:: + + pythonize_name('CamelCase') -> 'camel_case' + pythonize_name('already_pythonized') -> 'already_pythonized' + pythonize_name('HTTPRequest') -> 'http_request' + pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok' + pythonize_name('UPPER') -> 'upper' + pythonize_name('') -> '' + + """ + s1 = _first_cap_regex.sub(r'\1_\2', name) + s2 = _number_cap_regex.sub(r'\1_\2', s1) + return _end_cap_regex.sub(r'\1_\2', s2).lower() + + +def write_mime_multipart(content, compress=False, deftype='text/plain', delimiter=':'): + """Description: + :param content: A list of tuples of name-content pairs. This is used + instead of a dict to ensure that scripts run in order + :type list of tuples: + + :param compress: Use gzip to compress the scripts, defaults to no compression + :type bool: + + :param deftype: The type that should be assumed if nothing else can be figured out + :type str: + + :param delimiter: mime delimiter + :type str: + + :return: Final mime multipart + :rtype: str: + """ + wrapper = email.mime.multipart.MIMEMultipart() + for name, con in content: + definite_type = guess_mime_type(con, deftype) + maintype, subtype = definite_type.split('/', 1) + if maintype == 'text': + mime_con = email.mime.text.MIMEText(con, _subtype=subtype) + else: + mime_con = email.mime.base.MIMEBase(maintype, subtype) + mime_con.set_payload(con) + # Encode the payload using Base64 + email.encoders.encode_base64(mime_con) + mime_con.add_header('Content-Disposition', 'attachment', filename=name) + wrapper.attach(mime_con) + rcontent = wrapper.as_string() + + if compress: + buf = StringIO.StringIO() + gz = gzip.GzipFile(mode='wb', fileobj=buf) + try: + gz.write(rcontent) + finally: + gz.close() + rcontent = buf.getvalue() + + return rcontent + + +def guess_mime_type(content, deftype): + """Description: Guess the mime type of a block of text + :param content: content we're finding the type of + :type str: + + :param deftype: Default mime type + :type str: + + :rtype: : + :return: + """ + #Mappings recognized by cloudinit + starts_with_mappings = { + '#include': 'text/x-include-url', + '#!': 'text/x-shellscript', + '#cloud-config': 'text/cloud-config', + '#upstart-job': 'text/upstart-job', + '#part-handler': 'text/part-handler', + '#cloud-boothook': 'text/cloud-boothook' + } + rtype = deftype + for possible_type, mimetype in starts_with_mappings.items(): + if content.startswith(possible_type): + rtype = mimetype + break + return(rtype) + + +def compute_md5(fp, buf_size=8192, size=None): + """ + Compute MD5 hash on passed file and return results in a tuple of values. + + :type fp: file + :param fp: File pointer to the file to MD5 hash. The file pointer + will be reset to its current location before the + method returns. + + :type buf_size: integer + :param buf_size: Number of bytes per read request. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from + the file pointer (fp). This is useful when uploading + a file in multiple parts where the file is being + split inplace into different parts. Less bytes may + be available. + + :rtype: tuple + :return: A tuple containing the hex digest version of the MD5 hash + as the first element, the base64 encoded version of the + plain digest as the second element and the data size as + the third element. + """ + return compute_hash(fp, buf_size, size, hash_algorithm=md5) + + +def compute_hash(fp, buf_size=8192, size=None, hash_algorithm=md5): + hash_obj = hash_algorithm() + spos = fp.tell() + if size and size < buf_size: + s = fp.read(size) + else: + s = fp.read(buf_size) + while s: + hash_obj.update(s) + if size: + size -= len(s) + if size <= 0: + break + if size and size < buf_size: + s = fp.read(size) + else: + s = fp.read(buf_size) + hex_digest = hash_obj.hexdigest() + base64_digest = base64.encodestring(hash_obj.digest()) + if base64_digest[-1] == '\n': + base64_digest = base64_digest[0:-1] + # data_size based on bytes read. + data_size = fp.tell() - spos + fp.seek(spos) + return (hex_digest, base64_digest, data_size) + + +def find_matching_headers(name, headers): + """ + Takes a specific header name and a dict of headers {"name": "value"}. + Returns a list of matching header names, case-insensitive. + + """ + return [h for h in headers if h.lower() == name.lower()] + + +def merge_headers_by_name(name, headers): + """ + Takes a specific header name and a dict of headers {"name": "value"}. + Returns a string of all header values, comma-separated, that match the + input header name, case-insensitive. + + """ + matching_headers = find_matching_headers(name, headers) + return ','.join(str(headers[h]) for h in matching_headers + if headers[h] is not None) diff --git a/awx/lib/site-packages/boto/vpc/__init__.py b/awx/lib/site-packages/boto/vpc/__init__.py new file mode 100644 index 0000000000..24a93a7409 --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/__init__.py @@ -0,0 +1,1148 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EC2 service. +""" + +from boto.ec2.connection import EC2Connection +from boto.resultset import ResultSet +from boto.vpc.vpc import VPC +from boto.vpc.customergateway import CustomerGateway +from boto.vpc.routetable import RouteTable +from boto.vpc.internetgateway import InternetGateway +from boto.vpc.vpngateway import VpnGateway, Attachment +from boto.vpc.dhcpoptions import DhcpOptions +from boto.vpc.subnet import Subnet +from boto.vpc.vpnconnection import VpnConnection +from boto.ec2 import RegionData +from boto.regioninfo import RegionInfo + +def regions(**kw_params): + """ + Get all available regions for the EC2 service. + You may pass any of the arguments accepted by the VPCConnection + object's constructor as keyword arguments and they will be + passed along to the VPCConnection object. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + regions = [] + for region_name in RegionData: + region = RegionInfo(name=region_name, + endpoint=RegionData[region_name], + connection_cls=VPCConnection) + regions.append(region) + regions.append(RegionInfo(name='us-gov-west-1', + endpoint=RegionData[region_name], + connection_cls=VPCConnection) + ) + return regions + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.vpc.VPCConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.vpc.VPCConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(**kw_params): + if region.name == region_name: + return region.connect(**kw_params) + return None + + +class VPCConnection(EC2Connection): + + # VPC methods + + def get_all_vpcs(self, vpc_ids=None, filters=None, dry_run=False): + """ + Retrieve information about your VPCs. You can filter results to + return information only about those VPCs that match your search + parameters. Otherwise, all VPCs associated with your account + are returned. + + :type vpc_ids: list + :param vpc_ids: A list of strings with the desired VPC ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + * *state* - a list of states of the VPC (pending or available) + * *cidrBlock* - a list CIDR blocks of the VPC + * *dhcpOptionsId* - a list of IDs of a set of DHCP options + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_ids: + self.build_list_params(params, vpc_ids, 'VpcId') + if filters: + self.build_filter_params(params, dict(filters)) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpcs', params, [('item', VPC)]) + + def create_vpc(self, cidr_block, dry_run=False): + """ + Create a new Virtual Private Cloud. + + :type cidr_block: str + :param cidr_block: A valid CIDR block + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VPC + :return: A :class:`boto.vpc.vpc.VPC` object + """ + params = {'CidrBlock' : cidr_block} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpc', params, VPC) + + def delete_vpc(self, vpc_id, dry_run=False): + """ + Delete a Virtual Private Cloud. + + :type vpc_id: str + :param vpc_id: The ID of the vpc to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpc', params) + + def modify_vpc_attribute(self, vpc_id, + enable_dns_support=None, + enable_dns_hostnames=None, dry_run=False): + """ + Modifies the specified attribute of the specified VPC. + You can only modify one attribute at a time. + + :type vpc_id: str + :param vpc_id: The ID of the vpc to be deleted. + + :type enable_dns_support: bool + :param enable_dns_support: Specifies whether the DNS server + provided by Amazon is enabled for the VPC. + + :type enable_dns_hostnames: bool + :param enable_dns_hostnames: Specifies whether DNS hostnames are + provided for the instances launched in this VPC. You can only + set this attribute to ``true`` if EnableDnsSupport + is also ``true``. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'VpcId': vpc_id} + if enable_dns_support is not None: + if enable_dns_support: + params['EnableDnsSupport.Value'] = 'true' + else: + params['EnableDnsSupport.Value'] = 'false' + if enable_dns_hostnames is not None: + if enable_dns_hostnames: + params['EnableDnsHostnames.Value'] = 'true' + else: + params['EnableDnsHostnames.Value'] = 'false' + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyVpcAttribute', params) + + # Route Tables + + def get_all_route_tables(self, route_table_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your routing tables. You can filter results + to return information only about those route tables that match your + search parameters. Otherwise, all route tables associated with your + account are returned. + + :type route_table_ids: list + :param route_table_ids: A list of strings with the desired route table + IDs. + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.routetable.RouteTable` + """ + params = {} + if route_table_ids: + self.build_list_params(params, route_table_ids, "RouteTableId") + if filters: + self.build_filter_params(params, dict(filters)) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeRouteTables', params, + [('item', RouteTable)]) + + def associate_route_table(self, route_table_id, subnet_id, dry_run=False): + """ + Associates a route table with a specific subnet. + + :type route_table_id: str + :param route_table_id: The ID of the route table to associate. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to associate with. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: str + :return: The ID of the association created + """ + params = { + 'RouteTableId': route_table_id, + 'SubnetId': subnet_id + } + if dry_run: + params['DryRun'] = 'true' + result = self.get_object('AssociateRouteTable', params, ResultSet) + return result.associationId + + def disassociate_route_table(self, association_id, dry_run=False): + """ + Removes an association from a route table. This will cause all subnets + that would've used this association to now use the main routing + association instead. + + :type association_id: str + :param association_id: The ID of the association to disassociate. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { 'AssociationId': association_id } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisassociateRouteTable', params) + + def create_route_table(self, vpc_id, dry_run=False): + """ + Creates a new route table. + + :type vpc_id: str + :param vpc_id: The VPC ID to associate this route table with. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created route table + :return: A :class:`boto.vpc.routetable.RouteTable` object + """ + params = { 'VpcId': vpc_id } + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateRouteTable', params, RouteTable) + + def delete_route_table(self, route_table_id, dry_run=False): + """ + Delete a route table. + + :type route_table_id: str + :param route_table_id: The ID of the route table to delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { 'RouteTableId': route_table_id } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteRouteTable', params) + + def create_route(self, route_table_id, destination_cidr_block, + gateway_id=None, instance_id=None, dry_run=False): + """ + Creates a new route in the route table within a VPC. The route's target + can be either a gateway attached to the VPC or a NAT instance in the + VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table for the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for the + destination match. + + :type gateway_id: str + :param gateway_id: The ID of the gateway attached to your VPC. + + :type instance_id: str + :param instance_id: The ID of a NAT instance in your VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + + if gateway_id is not None: + params['GatewayId'] = gateway_id + elif instance_id is not None: + params['InstanceId'] = instance_id + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('CreateRoute', params) + + def replace_route(self, route_table_id, destination_cidr_block, + gateway_id=None, instance_id=None, interface_id=None, + dry_run=False): + """ + Replaces an existing route within a route table in a VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table for the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for the + destination match. + + :type gateway_id: str + :param gateway_id: The ID of the gateway attached to your VPC. + + :type instance_id: str + :param instance_id: The ID of a NAT instance in your VPC. + + :type interface_id: str + :param interface_id: Allows routing to network interface attachments. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + + if gateway_id is not None: + params['GatewayId'] = gateway_id + elif instance_id is not None: + params['InstanceId'] = instance_id + elif interface_id is not None: + params['NetworkInterfaceId'] = interface_id + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('ReplaceRoute', params) + + def delete_route(self, route_table_id, destination_cidr_block, + dry_run=False): + """ + Deletes a route from a route table within a VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table with the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for + destination match. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteRoute', params) + + # Internet Gateways + + def get_all_internet_gateways(self, internet_gateway_ids=None, + filters=None, dry_run=False): + """ + Get a list of internet gateways. You can filter results to return information + about only those gateways that you're interested in. + + :type internet_gateway_ids: list + :param internet_gateway_ids: A list of strings with the desired gateway IDs. + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + + if internet_gateway_ids: + self.build_list_params(params, internet_gateway_ids, + 'InternetGatewayId') + if filters: + self.build_filter_params(params, dict(filters)) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeInternetGateways', params, + [('item', InternetGateway)]) + + def create_internet_gateway(self, dry_run=False): + """ + Creates an internet gateway for VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Newly created internet gateway. + :return: `boto.vpc.internetgateway.InternetGateway` + """ + params = {} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateInternetGateway', params, InternetGateway) + + def delete_internet_gateway(self, internet_gateway_id, dry_run=False): + """ + Deletes an internet gateway from the VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = { 'InternetGatewayId': internet_gateway_id } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteInternetGateway', params) + + def attach_internet_gateway(self, internet_gateway_id, vpc_id, + dry_run=False): + """ + Attach an internet gateway to a specific VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to attach. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to attach to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = { + 'InternetGatewayId': internet_gateway_id, + 'VpcId': vpc_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AttachInternetGateway', params) + + def detach_internet_gateway(self, internet_gateway_id, vpc_id, + dry_run=False): + """ + Detach an internet gateway from a specific VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to detach. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to attach to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = { + 'InternetGatewayId': internet_gateway_id, + 'VpcId': vpc_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachInternetGateway', params) + + # Customer Gateways + + def get_all_customer_gateways(self, customer_gateway_ids=None, + filters=None, dry_run=False): + """ + Retrieve information about your CustomerGateways. You can filter + results to return information only about those CustomerGateways that + match your search parameters. Otherwise, all CustomerGateways + associated with your account are returned. + + :type customer_gateway_ids: list + :param customer_gateway_ids: A list of strings with the desired + CustomerGateway ID's. + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, the state of the CustomerGateway + (pending,available,deleting,deleted) + - *type*, the type of customer gateway (ipsec.1) + - *ipAddress* the IP address of customer gateway's + internet-routable external inteface + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.customergateway.CustomerGateway` + """ + params = {} + if customer_gateway_ids: + self.build_list_params(params, customer_gateway_ids, + 'CustomerGatewayId') + if filters: + self.build_filter_params(params, dict(filters)) + + if dry_run: + params['DryRun'] = 'true' + + return self.get_list('DescribeCustomerGateways', params, + [('item', CustomerGateway)]) + + def create_customer_gateway(self, type, ip_address, bgp_asn, dry_run=False): + """ + Create a new Customer Gateway + + :type type: str + :param type: Type of VPN Connection. Only valid value currently is 'ipsec.1' + + :type ip_address: str + :param ip_address: Internet-routable IP address for customer's gateway. + Must be a static address. + + :type bgp_asn: str + :param bgp_asn: Customer gateway's Border Gateway Protocol (BGP) + Autonomous System Number (ASN) + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created CustomerGateway + :return: A :class:`boto.vpc.customergateway.CustomerGateway` object + """ + params = {'Type' : type, + 'IpAddress' : ip_address, + 'BgpAsn' : bgp_asn} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateCustomerGateway', params, CustomerGateway) + + def delete_customer_gateway(self, customer_gateway_id, dry_run=False): + """ + Delete a Customer Gateway. + + :type customer_gateway_id: str + :param customer_gateway_id: The ID of the customer_gateway to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'CustomerGatewayId': customer_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteCustomerGateway', params) + + # VPN Gateways + + def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your VpnGateways. You can filter results to + return information only about those VpnGateways that match your search + parameters. Otherwise, all VpnGateways associated with your account + are returned. + + :type vpn_gateway_ids: list + :param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the VpnGateway + (pending,available,deleting,deleted) + - *type*, a list types of customer gateway (ipsec.1) + - *availabilityZone*, a list of Availability zones the + VPN gateway is in. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.customergateway.VpnGateway` + """ + params = {} + if vpn_gateway_ids: + self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId') + if filters: + self.build_filter_params(params, dict(filters)) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpnGateways', params, + [('item', VpnGateway)]) + + def create_vpn_gateway(self, type, availability_zone=None, dry_run=False): + """ + Create a new Vpn Gateway + + :type type: str + :param type: Type of VPN Connection. Only valid value currently is 'ipsec.1' + + :type availability_zone: str + :param availability_zone: The Availability Zone where you want the VPN gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VpnGateway + :return: A :class:`boto.vpc.vpngateway.VpnGateway` object + """ + params = {'Type' : type} + if availability_zone: + params['AvailabilityZone'] = availability_zone + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpnGateway', params, VpnGateway) + + def delete_vpn_gateway(self, vpn_gateway_id, dry_run=False): + """ + Delete a Vpn Gateway. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnGatewayId': vpn_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnGateway', params) + + def attach_vpn_gateway(self, vpn_gateway_id, vpc_id, dry_run=False): + """ + Attaches a VPN gateway to a VPC. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to attach + + :type vpc_id: str + :param vpc_id: The ID of the VPC you want to attach the gateway to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: An attachment + :return: a :class:`boto.vpc.vpngateway.Attachment` + """ + params = {'VpnGatewayId': vpn_gateway_id, + 'VpcId' : vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('AttachVpnGateway', params, Attachment) + + # Subnets + + def get_all_subnets(self, subnet_ids=None, filters=None, dry_run=False): + """ + Retrieve information about your Subnets. You can filter results to + return information only about those Subnets that match your search + parameters. Otherwise, all Subnets associated with your account + are returned. + + :type subnet_ids: list + :param subnet_ids: A list of strings with the desired Subnet ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the Subnet + (pending,available) + - *vpcId*, a list of IDs of teh VPC the subnet is in. + - *cidrBlock*, a list of CIDR blocks of the subnet + - *availabilityZone*, list of the Availability Zones + the subnet is in. + + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.subnet.Subnet` + """ + params = {} + if subnet_ids: + self.build_list_params(params, subnet_ids, 'SubnetId') + if filters: + self.build_filter_params(params, dict(filters)) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSubnets', params, [('item', Subnet)]) + + def create_subnet(self, vpc_id, cidr_block, availability_zone=None, + dry_run=False): + """ + Create a new Subnet + + :type vpc_id: str + :param vpc_id: The ID of the VPC where you want to create the subnet. + + :type cidr_block: str + :param cidr_block: The CIDR block you want the subnet to cover. + + :type availability_zone: str + :param availability_zone: The AZ you want the subnet in + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created Subnet + :return: A :class:`boto.vpc.customergateway.Subnet` object + """ + params = {'VpcId' : vpc_id, + 'CidrBlock' : cidr_block} + if availability_zone: + params['AvailabilityZone'] = availability_zone + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateSubnet', params, Subnet) + + def delete_subnet(self, subnet_id, dry_run=False): + """ + Delete a subnet. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'SubnetId': subnet_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteSubnet', params) + + + # DHCP Options + + def get_all_dhcp_options(self, dhcp_options_ids=None, dry_run=False): + """ + Retrieve information about your DhcpOptions. + + :type dhcp_options_ids: list + :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions` + """ + params = {} + if dhcp_options_ids: + self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeDhcpOptions', params, + [('item', DhcpOptions)]) + + def create_dhcp_options(self, domain_name=None, domain_name_servers=None, + ntp_servers=None, netbios_name_servers=None, + netbios_node_type=None, dry_run=False): + """ + Create a new DhcpOption + + This corresponds to + http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateDhcpOptions.html + + :type domain_name: str + :param domain_name: A domain name of your choice (for example, + example.com) + + :type domain_name_servers: list of strings + :param domain_name_servers: The IP address of a domain name server. You + can specify up to four addresses. + + :type ntp_servers: list of strings + :param ntp_servers: The IP address of a Network Time Protocol (NTP) + server. You can specify up to four addresses. + + :type netbios_name_servers: list of strings + :param netbios_name_servers: The IP address of a NetBIOS name server. + You can specify up to four addresses. + + :type netbios_node_type: str + :param netbios_node_type: The NetBIOS node type (1, 2, 4, or 8). For + more information about the values, see RFC 2132. We recommend you + only use 2 at this time (broadcast and multicast are currently not + supported). + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created DhcpOption + :return: A :class:`boto.vpc.customergateway.DhcpOption` object + """ + + key_counter = 1 + params = {} + + def insert_option(params, name, value): + params['DhcpConfiguration.%d.Key' % (key_counter,)] = name + if isinstance(value, (list, tuple)): + for idx, value in enumerate(value, 1): + key_name = 'DhcpConfiguration.%d.Value.%d' % ( + key_counter, idx) + params[key_name] = value + else: + key_name = 'DhcpConfiguration.%d.Value.1' % (key_counter,) + params[key_name] = value + + return key_counter + 1 + + if domain_name: + key_counter = insert_option(params, + 'domain-name', domain_name) + if domain_name_servers: + key_counter = insert_option(params, + 'domain-name-servers', domain_name_servers) + if ntp_servers: + key_counter = insert_option(params, + 'ntp-servers', ntp_servers) + if netbios_name_servers: + key_counter = insert_option(params, + 'netbios-name-servers', netbios_name_servers) + if netbios_node_type: + key_counter = insert_option(params, + 'netbios-node-type', netbios_node_type) + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('CreateDhcpOptions', params, DhcpOptions) + + def delete_dhcp_options(self, dhcp_options_id, dry_run=False): + """ + Delete a DHCP Options + + :type dhcp_options_id: str + :param dhcp_options_id: The ID of the DHCP Options to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'DhcpOptionsId': dhcp_options_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteDhcpOptions', params) + + def associate_dhcp_options(self, dhcp_options_id, vpc_id, dry_run=False): + """ + Associate a set of Dhcp Options with a VPC. + + :type dhcp_options_id: str + :param dhcp_options_id: The ID of the Dhcp Options + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'DhcpOptionsId': dhcp_options_id, + 'VpcId' : vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AssociateDhcpOptions', params) + + # VPN Connection + + def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your VPN_CONNECTIONs. You can filter results to + return information only about those VPN_CONNECTIONs that match your search + parameters. Otherwise, all VPN_CONNECTIONs associated with your account + are returned. + + :type vpn_connection_ids: list + :param vpn_connection_ids: A list of strings with the desired VPN_CONNECTION ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the VPN_CONNECTION + pending,available,deleting,deleted + - *type*, a list of types of connection, currently 'ipsec.1' + - *customerGatewayId*, a list of IDs of the customer gateway + associated with the VPN + - *vpnGatewayId*, a list of IDs of the VPN gateway associated + with the VPN connection + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpn_connection.vpnconnection.VpnConnection` + """ + params = {} + if vpn_connection_ids: + self.build_list_params(params, vpn_connection_ids, + 'Vpn_ConnectionId') + if filters: + self.build_filter_params(params, dict(filters)) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpnConnections', params, + [('item', VpnConnection)]) + + def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id, + dry_run=False): + """ + Create a new VPN Connection. + + :type type: str + :param type: The type of VPN Connection. Currently only 'ipsec.1' + is supported + + :type customer_gateway_id: str + :param customer_gateway_id: The ID of the customer gateway. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the VPN gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VpnConnection + :return: A :class:`boto.vpc.vpnconnection.VpnConnection` object + """ + params = {'Type' : type, + 'CustomerGatewayId' : customer_gateway_id, + 'VpnGatewayId' : vpn_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpnConnection', params, VpnConnection) + + def delete_vpn_connection(self, vpn_connection_id, dry_run=False): + """ + Delete a VPN Connection. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the vpn_connection to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnConnectionId': vpn_connection_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnConnection', params) + + def disable_vgw_route_propagation(self, route_table_id, gateway_id, + dry_run=False): + """ + Disables a virtual private gateway (VGW) from propagating routes to the + routing tables of an Amazon VPC. + + :type route_table_id: str + :param route_table_id: The ID of the routing table. + + :type gateway_id: str + :param gateway_id: The ID of the virtual private gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'GatewayId': gateway_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisableVgwRoutePropagation', params) + + def enable_vgw_route_propagation(self, route_table_id, gateway_id, + dry_run=False): + """ + Enables a virtual private gateway (VGW) to propagate routes to the + routing tables of an Amazon VPC. + + :type route_table_id: str + :param route_table_id: The ID of the routing table. + + :type gateway_id: str + :param gateway_id: The ID of the virtual private gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'GatewayId': gateway_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('EnableVgwRoutePropagation', params) + + def create_vpn_connection_route(self, destination_cidr_block, + vpn_connection_id, dry_run=False): + """ + Creates a new static route associated with a VPN connection between an + existing virtual private gateway and a VPN customer gateway. The static + route allows traffic to be routed from the virtual private gateway to + the VPN customer gateway. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the VPN connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'DestinationCidrBlock': destination_cidr_block, + 'VpnConnectionId': vpn_connection_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('CreateVpnConnectionRoute', params) + + def delete_vpn_connection_route(self, destination_cidr_block, + vpn_connection_id, dry_run=False): + """ + Deletes a static route associated with a VPN connection between an + existing virtual private gateway and a VPN customer gateway. The static + route allows traffic to be routed from the virtual private gateway to + the VPN customer gateway. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the VPN connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'DestinationCidrBlock': destination_cidr_block, + 'VpnConnectionId': vpn_connection_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnConnectionRoute', params) diff --git a/awx/lib/site-packages/boto/vpc/customergateway.py b/awx/lib/site-packages/boto/vpc/customergateway.py new file mode 100644 index 0000000000..959d01fbca --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/customergateway.py @@ -0,0 +1,54 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Customer Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class CustomerGateway(TaggedEC2Object): + + def __init__(self, connection=None): + TaggedEC2Object.__init__(self, connection) + self.id = None + self.type = None + self.state = None + self.ip_address = None + self.bgp_asn = None + + def __repr__(self): + return 'CustomerGateway:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'customerGatewayId': + self.id = value + elif name == 'ipAddress': + self.ip_address = value + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'bgpAsn': + self.bgp_asn = value + else: + setattr(self, name, value) + diff --git a/awx/lib/site-packages/boto/vpc/dhcpoptions.py b/awx/lib/site-packages/boto/vpc/dhcpoptions.py new file mode 100644 index 0000000000..7484683822 --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/dhcpoptions.py @@ -0,0 +1,72 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a DHCP Options set +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class DhcpValueSet(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'value': + self.append(value) + +class DhcpConfigSet(dict): + + def startElement(self, name, attrs, connection): + if name == 'valueSet': + if self._name not in self: + self[self._name] = DhcpValueSet() + return self[self._name] + + def endElement(self, name, value, connection): + if name == 'key': + self._name = value + +class DhcpOptions(TaggedEC2Object): + + def __init__(self, connection=None): + TaggedEC2Object.__init__(self, connection) + self.id = None + self.options = None + + def __repr__(self): + return 'DhcpOptions:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = TaggedEC2Object.startElement(self, name, attrs, connection) + if retval is not None: + return retval + if name == 'dhcpConfigurationSet': + self.options = DhcpConfigSet() + return self.options + + def endElement(self, name, value, connection): + if name == 'dhcpOptionsId': + self.id = value + else: + setattr(self, name, value) + diff --git a/awx/lib/site-packages/boto/vpc/internetgateway.py b/awx/lib/site-packages/boto/vpc/internetgateway.py new file mode 100644 index 0000000000..011fdee1af --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/internetgateway.py @@ -0,0 +1,72 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an Internet Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + +class InternetGateway(TaggedEC2Object): + def __init__(self, connection=None): + TaggedEC2Object.__init__(self, connection) + self.id = None + self.attachments = [] + + def __repr__(self): + return 'InternetGateway:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(InternetGateway, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'attachmentSet': + self.attachments = ResultSet([('item', InternetGatewayAttachment)]) + return self.attachments + else: + return None + + def endElement(self, name, value, connection): + if name == 'internetGatewayId': + self.id = value + else: + setattr(self, name, value) + +class InternetGatewayAttachment(object): + def __init__(self, connection=None): + self.vpc_id = None + self.state = None + + def __repr__(self): + return 'InternetGatewayAttachment:%s' % self.vpc_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value diff --git a/awx/lib/site-packages/boto/vpc/routetable.py b/awx/lib/site-packages/boto/vpc/routetable.py new file mode 100644 index 0000000000..b3f0055349 --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/routetable.py @@ -0,0 +1,109 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Route Table +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + +class RouteTable(TaggedEC2Object): + + def __init__(self, connection=None): + TaggedEC2Object.__init__(self, connection) + self.id = None + self.vpc_id = None + self.routes = [] + self.associations = [] + + def __repr__(self): + return 'RouteTable:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(RouteTable, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'routeSet': + self.routes = ResultSet([('item', Route)]) + return self.routes + elif name == 'associationSet': + self.associations = ResultSet([('item', RouteAssociation)]) + return self.associations + else: + return None + + def endElement(self, name, value, connection): + if name == 'routeTableId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + else: + setattr(self, name, value) + +class Route(object): + def __init__(self, connection=None): + self.destination_cidr_block = None + self.gateway_id = None + self.instance_id = None + self.state = None + + def __repr__(self): + return 'Route:%s' % self.destination_cidr_block + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'destinationCidrBlock': + self.destination_cidr_block = value + elif name == 'gatewayId': + self.gateway_id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'state': + self.state = value + +class RouteAssociation(object): + def __init__(self, connection=None): + self.id = None + self.route_table_id = None + self.subnet_id = None + self.main = False + + def __repr__(self): + return 'RouteAssociation:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'routeTableAssociationId': + self.id = value + elif name == 'routeTableId': + self.route_table_id = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'main': + self.main = value == 'true' diff --git a/awx/lib/site-packages/boto/vpc/subnet.py b/awx/lib/site-packages/boto/vpc/subnet.py new file mode 100644 index 0000000000..f87d72c289 --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/subnet.py @@ -0,0 +1,57 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Subnet +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class Subnet(TaggedEC2Object): + + def __init__(self, connection=None): + TaggedEC2Object.__init__(self, connection) + self.id = None + self.vpc_id = None + self.state = None + self.cidr_block = None + self.available_ip_address_count = 0 + self.availability_zone = None + + def __repr__(self): + return 'Subnet:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'subnetId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value + elif name == 'cidrBlock': + self.cidr_block = value + elif name == 'availableIpAddressCount': + self.available_ip_address_count = int(value) + elif name == 'availabilityZone': + self.availability_zone = value + else: + setattr(self, name, value) + diff --git a/awx/lib/site-packages/boto/vpc/vpc.py b/awx/lib/site-packages/boto/vpc/vpc.py new file mode 100644 index 0000000000..2eb480d192 --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/vpc.py @@ -0,0 +1,85 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Virtual Private Cloud. +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class VPC(TaggedEC2Object): + + def __init__(self, connection=None): + """ + Represents a VPC. + + :ivar id: The unique ID of the VPC. + :ivar dhcp_options_id: The ID of the set of DHCP options you've associated with the VPC + (or default if the default options are associated with the VPC). + :ivar state: The current state of the VPC. + :ivar cidr_block: The CIDR block for the VPC. + :ivar is_default: Indicates whether the VPC is the default VPC. + :ivar instance_tenancy: The allowed tenancy of instances launched into the VPC. + """ + TaggedEC2Object.__init__(self, connection) + self.id = None + self.dhcp_options_id = None + self.state = None + self.cidr_block = None + self.is_default = None + self.instance_tenancy = None + + def __repr__(self): + return 'VPC:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.id = value + elif name == 'dhcpOptionsId': + self.dhcp_options_id = value + elif name == 'state': + self.state = value + elif name == 'cidrBlock': + self.cidr_block = value + elif name == 'isDefault': + self.is_default = True if value == 'true' else False + elif name == 'instanceTenancy': + self.instance_tenancy = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_vpc(self.id) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + vpc_list = self.connection.get_all_vpcs( + [self.id], + dry_run=dry_run + ) + if len(vpc_list): + updated_vpc = vpc_list[0] + self._update(updated_vpc) + elif validate: + raise ValueError('%s is not a valid VPC ID' % (self.id,)) + return self.state diff --git a/awx/lib/site-packages/boto/vpc/vpnconnection.py b/awx/lib/site-packages/boto/vpc/vpnconnection.py new file mode 100644 index 0000000000..c36492f53c --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/vpnconnection.py @@ -0,0 +1,204 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +from datetime import datetime +from boto.resultset import ResultSet + +""" +Represents a VPN Connectionn +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class VpnConnectionOptions(object): + """ + Represents VPN connection options + + :ivar static_routes_only: Indicates whether the VPN connection uses static + routes only. Static routes must be used for devices that don't support + BGP. + + """ + def __init__(self, static_routes_only=None): + self.static_routes_only = static_routes_only + + def __repr__(self): + return 'VpnConnectionOptions' + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'staticRoutesOnly': + self.static_routes_only = True if value == 'true' else False + else: + setattr(self, name, value) + +class VpnStaticRoute(object): + """ + Represents a static route for a VPN connection. + + :ivar destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + :ivar source: Indicates how the routes were provided. + :ivar state: The current state of the static route. + """ + def __init__(self, destination_cidr_block=None, source=None, state=None): + self.destination_cidr_block = destination_cidr_block + self.source = source + self.available = state + + def __repr__(self): + return 'VpnStaticRoute: %s' % self.destination_cidr_block + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'destinationCidrBlock': + self.destination_cidr_block = value + elif name == 'source': + self.source = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + +class VpnTunnel(object): + """ + Represents telemetry for a VPN tunnel + + :ivar outside_ip_address: The Internet-routable IP address of the + virtual private gateway's outside interface. + :ivar status: The status of the VPN tunnel. Valid values: UP | DOWN + :ivar last_status_change: The date and time of the last change in status. + :ivar status_message: If an error occurs, a description of the error. + :ivar accepted_route_count: The number of accepted routes. + """ + def __init__(self, outside_ip_address=None, status=None, last_status_change=None, + status_message=None, accepted_route_count=None): + self.outside_ip_address = outside_ip_address + self.status = status + self.last_status_change = last_status_change + self.status_message = status_message + self.accepted_route_count = accepted_route_count + + def __repr__(self): + return 'VpnTunnel: %s' % self.outside_ip_address + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'outsideIpAddress': + self.outside_ip_address = value + elif name == 'status': + self.status = value + elif name == 'lastStatusChange': + self.last_status_change = datetime.strptime(value, + '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == 'statusMessage': + self.status_message = value + elif name == 'acceptedRouteCount': + try: + value = int(value) + except ValueError: + boto.log.warning('Error converting code (%s) to int' % value) + self.accepted_route_count = value + else: + setattr(self, name, value) + +class VpnConnection(TaggedEC2Object): + """ + Represents a VPN Connection + + :ivar id: The ID of the VPN connection. + :ivar state: The current state of the VPN connection. + Valid values: pending | available | deleting | deleted + :ivar customer_gateway_configuration: The configuration information for the + VPN connection's customer gateway (in the native XML format). This + element is always present in the + :class:`boto.vpc.VPCConnection.create_vpn_connection` response; + however, it's present in the + :class:`boto.vpc.VPCConnection.get_all_vpn_connections` response only + if the VPN connection is in the pending or available state. + :ivar type: The type of VPN connection (ipsec.1). + :ivar customer_gateway_id: The ID of the customer gateway at your end of + the VPN connection. + :ivar vpn_gateway_id: The ID of the virtual private gateway + at the AWS side of the VPN connection. + :ivar tunnels: A list of the vpn tunnels (always 2) + :ivar options: The option set describing the VPN connection. + :ivar static_routes: A list of static routes associated with a VPN + connection. + + """ + def __init__(self, connection=None): + TaggedEC2Object.__init__(self, connection) + self.id = None + self.state = None + self.customer_gateway_configuration = None + self.type = None + self.customer_gateway_id = None + self.vpn_gateway_id = None + self.tunnels = [] + self.options = None + self.static_routes = [] + + def __repr__(self): + return 'VpnConnection:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(VpnConnection, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'vgwTelemetry': + self.tunnels = ResultSet([('item', VpnTunnel)]) + return self.tunnels + elif name == 'routes': + self.static_routes = ResultSet([('item', VpnStaticRoute)]) + return self.static_routes + elif name == 'options': + self.options = VpnConnectionOptions() + return self.options + return None + + def endElement(self, name, value, connection): + if name == 'vpnConnectionId': + self.id = value + elif name == 'state': + self.state = value + elif name == 'customerGatewayConfiguration': + self.customer_gateway_configuration = value + elif name == 'type': + self.type = value + elif name == 'customerGatewayId': + self.customer_gateway_id = value + elif name == 'vpnGatewayId': + self.vpn_gateway_id = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + return self.connection.delete_vpn_connection( + self.id, + dry_run=dry_run + ) diff --git a/awx/lib/site-packages/boto/vpc/vpngateway.py b/awx/lib/site-packages/boto/vpc/vpngateway.py new file mode 100644 index 0000000000..fe476d935b --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/vpngateway.py @@ -0,0 +1,87 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Vpn Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class Attachment(object): + + def __init__(self, connection=None): + self.vpc_id = None + self.state = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + +class VpnGateway(TaggedEC2Object): + + def __init__(self, connection=None): + TaggedEC2Object.__init__(self, connection) + self.id = None + self.type = None + self.state = None + self.availability_zone = None + self.attachments = [] + + def __repr__(self): + return 'VpnGateway:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = TaggedEC2Object.startElement(self, name, attrs, connection) + if retval is not None: + return retval + if name == 'item': + att = Attachment() + self.attachments.append(att) + return att + + def endElement(self, name, value, connection): + if name == 'vpnGatewayId': + self.id = value + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'attachments': + pass + else: + setattr(self, name, value) + + def attach(self, vpc_id, dry_run=False): + return self.connection.attach_vpn_gateway( + self.id, + vpc_id, + dry_run=dry_run + ) + diff --git a/awx/lib/site-packages/celery/__init__.py b/awx/lib/site-packages/celery/__init__.py index 49a5ed9baf..335047b024 100644 --- a/awx/lib/site-packages/celery/__init__.py +++ b/awx/lib/site-packages/celery/__init__.py @@ -8,7 +8,7 @@ from __future__ import absolute_import SERIES = 'Chiastic Slide' -VERSION = (3, 0, 22) +VERSION = (3, 0, 23) __version__ = '.'.join(str(p) for p in VERSION[0:3]) + ''.join(VERSION[3:]) __author__ = 'Ask Solem' __contact__ = 'ask@celeryproject.org' diff --git a/awx/lib/site-packages/celery/app/base.py b/awx/lib/site-packages/celery/app/base.py index c4804aae0c..d69dbc495b 100644 --- a/awx/lib/site-packages/celery/app/base.py +++ b/awx/lib/site-packages/celery/app/base.py @@ -27,7 +27,7 @@ from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured from celery.loaders import get_loader_cls from celery.local import PromiseProxy, maybe_evaluate from celery._state import _task_stack, _tls, get_current_app, _register_app -from celery.utils.functional import first +from celery.utils.functional import first, maybe_list from celery.utils.imports import instantiate, symbol_by_name from .annotations import prepare as prepare_annotations @@ -230,6 +230,7 @@ class Celery(object): def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, result_cls=None, expires=None, queues=None, publisher=None, + link=None, link_error=None, **options): producer = producer or publisher # XXX compat if self.conf.CELERY_ALWAYS_EAGER: # pragma: no cover @@ -246,6 +247,8 @@ class Celery(object): name, args, kwargs, task_id=task_id, countdown=countdown, eta=eta, + callbacks=maybe_list(link), + errbacks=maybe_list(link_error), expires=expires, **options )) diff --git a/awx/lib/site-packages/celery/canvas.py b/awx/lib/site-packages/celery/canvas.py index 0eb360f6de..c7011e172c 100644 --- a/awx/lib/site-packages/celery/canvas.py +++ b/awx/lib/site-packages/celery/canvas.py @@ -432,6 +432,11 @@ class group(Signature): def __repr__(self): return repr(self.tasks) + + @property + def type(self): + return self._type or self.tasks[0].type.app.tasks[self['task']] + Signature.register_type(group) diff --git a/awx/lib/site-packages/celery/platforms.py b/awx/lib/site-packages/celery/platforms.py index 3e6b0f5338..bc2b40a9a2 100644 --- a/awx/lib/site-packages/celery/platforms.py +++ b/awx/lib/site-packages/celery/platforms.py @@ -292,7 +292,7 @@ class DaemonContext(object): self.stdfds = (sys.stdin, sys.stdout, sys.stderr) def redirect_to_null(self, fd): - if fd: + if fd is not None: dest = os.open(os.devnull, os.O_RDWR) os.dup2(dest, fd) diff --git a/awx/lib/site-packages/celery/states.py b/awx/lib/site-packages/celery/states.py index 4563fb72f7..925953ac10 100644 --- a/awx/lib/site-packages/celery/states.py +++ b/awx/lib/site-packages/celery/states.py @@ -71,6 +71,10 @@ PRECEDENCE = ['SUCCESS', 'RETRY', 'PENDING'] +#: Hash lookup of PRECEDENCE to index +PRECEDENCE_LOOKUP = dict(zip(PRECEDENCE, range(0, len(PRECEDENCE)))) +NONE_PRECEDENCE = PRECEDENCE_LOOKUP[None] + def precedence(state): """Get the precedence index for state. @@ -79,9 +83,9 @@ def precedence(state): """ try: - return PRECEDENCE.index(state) - except ValueError: - return PRECEDENCE.index(None) + return PRECEDENCE_LOOKUP[state] + except KeyError: + return NONE_PRECEDENCE class state(str): diff --git a/awx/lib/site-packages/celery/tests/utilities/test_term.py b/awx/lib/site-packages/celery/tests/utilities/test_term.py index 5e6ca50631..4f3e7ff22a 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_term.py +++ b/awx/lib/site-packages/celery/tests/utilities/test_term.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import +from kombu.utils import encoding from kombu.utils.encoding import str_t from celery.utils import term @@ -11,6 +12,16 @@ from celery.tests.utils import Case class test_colored(Case): + def setUp(self): + self._prev_encoding = encoding.default_encoding + + def getdefaultencoding(): + return 'utf-8' + encoding.default_encoding = getdefaultencoding + + def tearDown(self): + encoding.default_encoding = self._prev_encoding + def test_colors(self): colors = ( ('black', term.BLACK), diff --git a/awx/lib/site-packages/celery/utils/functional.py b/awx/lib/site-packages/celery/utils/functional.py index 242dda2280..011f32343a 100644 --- a/awx/lib/site-packages/celery/utils/functional.py +++ b/awx/lib/site-packages/celery/utils/functional.py @@ -280,6 +280,9 @@ class _regen(UserList, list): def __init__(self, it): self.__it = it + def __reduce__(self): + return list, (self.data, ) + @cached_property def data(self): return list(self.__it) diff --git a/awx/lib/site-packages/d2to1/__init__.py b/awx/lib/site-packages/d2to1/__init__.py new file mode 100644 index 0000000000..4089e8fa52 --- /dev/null +++ b/awx/lib/site-packages/d2to1/__init__.py @@ -0,0 +1,4 @@ +try: + __version__ = __import__('pkg_resources').get_distribution('d2to1').version +except: + __version__ = '' diff --git a/awx/lib/site-packages/d2to1/core.py b/awx/lib/site-packages/d2to1/core.py new file mode 100644 index 0000000000..929f575764 --- /dev/null +++ b/awx/lib/site-packages/d2to1/core.py @@ -0,0 +1,83 @@ +import os +import sys +import warnings + +from distutils.core import Distribution as _Distribution +from distutils.errors import DistutilsFileError, DistutilsSetupError +from setuptools.dist import _get_unpatched + +from .extern import six +from .util import DefaultGetDict, IgnoreDict, cfg_to_args + + +_Distribution = _get_unpatched(_Distribution) + + +def d2to1(dist, attr, value): + """Implements the actual d2to1 setup() keyword. When used, this should be + the only keyword in your setup() aside from `setup_requires`. + + If given as a string, the value of d2to1 is assumed to be the relative path + to the setup.cfg file to use. Otherwise, if it evaluates to true, it + simply assumes that d2to1 should be used, and the default 'setup.cfg' is + used. + + This works by reading the setup.cfg file, parsing out the supported + metadata and command options, and using them to rebuild the + `DistributionMetadata` object and set the newly added command options. + + The reason for doing things this way is that a custom `Distribution` class + will not play nicely with setup_requires; however, this implementation may + not work well with distributions that do use a `Distribution` subclass. + """ + + if not value: + return + if isinstance(value, six.string_types): + path = os.path.abspath(value) + else: + path = os.path.abspath('setup.cfg') + if not os.path.exists(path): + raise DistutilsFileError( + 'The setup.cfg file %s does not exist.' % path) + + # Converts the setup.cfg file to setup() arguments + try: + attrs = cfg_to_args(path) + except: + e = sys.exc_info()[1] + raise DistutilsSetupError( + 'Error parsing %s: %s: %s' % (path, e.__class__.__name__, + e.args[0])) + + # Repeat some of the Distribution initialization code with the newly + # provided attrs + if attrs: + # Skips 'options' and 'licence' support which are rarely used; may add + # back in later if demanded + for key, val in six.iteritems(attrs): + if hasattr(dist.metadata, 'set_' + key): + getattr(dist.metadata, 'set_' + key)(val) + elif hasattr(dist.metadata, key): + setattr(dist.metadata, key, val) + elif hasattr(dist, key): + setattr(dist, key, val) + else: + msg = 'Unknown distribution option: %s' % repr(key) + warnings.warn(msg) + + # Re-finalize the underlying Distribution + _Distribution.finalize_options(dist) + + # This bit comes out of distribute/setuptools + if isinstance(dist.metadata.version, six.integer_types + (float,)): + # Some people apparently take "version number" too literally :) + dist.metadata.version = str(dist.metadata.version) + + # This bit of hackery is necessary so that the Distribution will ignore + # normally unsupport command options (namely pre-hooks and post-hooks). + # dist.command_options is normally a dict mapping command names to dicts of + # their options. Now it will be a defaultdict that returns IgnoreDicts for + # the each command's options so we can pass through the unsupported options + ignore = ['pre_hook.*', 'post_hook.*'] + dist.command_options = DefaultGetDict(lambda: IgnoreDict(ignore)) diff --git a/awx/lib/site-packages/d2to1/extern/__init__.py b/awx/lib/site-packages/d2to1/extern/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/d2to1/extern/six.py b/awx/lib/site-packages/d2to1/extern/six.py new file mode 100644 index 0000000000..0cdd1c7e3d --- /dev/null +++ b/awx/lib/site-packages/d2to1/extern/six.py @@ -0,0 +1,386 @@ +# Copyright (c) 2010-2011 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.2.0" + + +# True if we are running on Python 3. +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform == "java": + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) + # This is a bit ugly, but it avoids running this again. + delattr(tp, self.name) + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + + +class _MovedItems(types.ModuleType): + """Lazy loading of moved objects""" + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("reload_module", "__builtin__", "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("winreg", "_winreg"), +] +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) +del attr + +moves = sys.modules["six.moves"] = _MovedItems("moves") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_code = "__code__" + _func_defaults = "__defaults__" + + _iterkeys = "keys" + _itervalues = "values" + _iteritems = "items" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_code = "func_code" + _func_defaults = "func_defaults" + + _iterkeys = "iterkeys" + _itervalues = "itervalues" + _iteritems = "iteritems" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +if PY3: + def get_unbound_function(unbound): + return unbound + + Iterator = object + + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) +else: + def get_unbound_function(unbound): + return unbound.im_func + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) + + +def iterkeys(d): + """Return an iterator over the keys of a dictionary.""" + return iter(getattr(d, _iterkeys)()) + +def itervalues(d): + """Return an iterator over the values of a dictionary.""" + return iter(getattr(d, _itervalues)()) + +def iteritems(d): + """Return an iterator over the (key, value) pairs of a dictionary.""" + return iter(getattr(d, _iteritems)()) + + +if PY3: + def b(s): + return s.encode("latin-1") + def u(s): + return s + if sys.version_info[1] <= 1: + def int2byte(i): + return bytes((i,)) + else: + # This is about 2x faster than the implementation above on 3.2+ + int2byte = operator.methodcaller("to_bytes", 1, "big") + import io + StringIO = io.StringIO + BytesIO = io.BytesIO +else: + def b(s): + return s + def u(s): + return unicode(s, "unicode_escape") + int2byte = chr + import StringIO + StringIO = BytesIO = StringIO.StringIO +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +if PY3: + import builtins + exec_ = getattr(builtins, "exec") + + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + + print_ = getattr(builtins, "print") + del builtins + +else: + def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + + def print_(*args, **kwargs): + """The new-style print function.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + def write(data): + if not isinstance(data, basestring): + data = str(data) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) + +_add_doc(reraise, """Reraise an exception.""") + + +def with_metaclass(meta, base=object): + """Create a base class with a metaclass.""" + return meta("NewBase", (base,), {}) diff --git a/awx/lib/site-packages/d2to1/util.py b/awx/lib/site-packages/d2to1/util.py new file mode 100644 index 0000000000..88c262314f --- /dev/null +++ b/awx/lib/site-packages/d2to1/util.py @@ -0,0 +1,580 @@ +"""The code in this module is mostly copy/pasted out of the distutils2 source +code, as recommended by Tarek Ziade. As such, it may be subject to some change +as distutils2 development continues, and will have to be kept up to date. + +I didn't want to use it directly from distutils2 itself, since I do not want it +to be an installation dependency for our packages yet--it is still too unstable +(the latest version on PyPI doesn't even install). +""" + +# These first two imports are not used, but are needed to get around an +# irritating Python bug that can crop up when using ./setup.py test. +# See: http://www.eby-sarna.com/pipermail/peak/2010-May/003355.html +try: + import multiprocessing +except ImportError: + pass +import logging + +import os +import re +import sys +import traceback + +from collections import defaultdict + +import distutils.ccompiler + +from distutils import log +from distutils.errors import (DistutilsOptionError, DistutilsModuleError, + DistutilsFileError) +from setuptools.command.egg_info import manifest_maker +from setuptools.dist import Distribution +from setuptools.extension import Extension + +from .extern.six import moves as m +RawConfigParser = m.configparser.RawConfigParser + + +# A simplified RE for this; just checks that the line ends with version +# predicates in () +_VERSION_SPEC_RE = re.compile(r'\s*(.*?)\s*\((.*)\)\s*$') + + +# Mappings from setup() keyword arguments to setup.cfg options; +# The values are (section, option) tuples, or simply (section,) tuples if +# the option has the same name as the setup() argument +D1_D2_SETUP_ARGS = { + "name": ("metadata",), + "version": ("metadata",), + "author": ("metadata",), + "author_email": ("metadata",), + "maintainer": ("metadata",), + "maintainer_email": ("metadata",), + "url": ("metadata", "home_page"), + "description": ("metadata", "summary"), + "keywords": ("metadata",), + "long_description": ("metadata", "description"), + "download-url": ("metadata",), + "classifiers": ("metadata", "classifier"), + "platforms": ("metadata", "platform"), # ** + "license": ("metadata",), + # Use setuptools install_requires, not + # broken distutils requires + "install_requires": ("metadata", "requires_dist"), + "setup_requires": ("metadata", "setup_requires_dist"), + "provides": ("metadata", "provides_dist"), # ** + "obsoletes": ("metadata", "obsoletes_dist"), # ** + "package_dir": ("files", 'packages_root'), + "packages": ("files",), + "package_data": ("files",), + "data_files": ("files",), + "scripts": ("files",), + "py_modules": ("files", "modules"), # ** + "cmdclass": ("global", "commands"), + # Not supported in distutils2, but provided for + # backwards compatibility with setuptools + "use_2to3": ("backwards_compat", "use_2to3"), + "zip_safe": ("backwards_compat", "zip_safe"), + "tests_require": ("backwards_compat", "tests_require"), + "dependency_links": ("backwards_compat",), + "include_package_data": ("backwards_compat",), +} + +# setup() arguments that can have multiple values in setup.cfg +MULTI_FIELDS = ("classifiers", + "platforms", + "install_requires", + "provides", + "obsoletes", + "packages", + "package_data", + "data_files", + "scripts", + "py_modules", + "dependency_links", + "setup_requires", + "tests_require", + "cmdclass") + +# setup() arguments that contain boolean values +BOOL_FIELDS = ("use_2to3", "zip_safe", "include_package_data") + + +CSV_FIELDS = ("keywords",) + + +log.set_verbosity(log.INFO) + + +def resolve_name(name): + """Resolve a name like ``module.object`` to an object and return it. + + Raise ImportError if the module or name is not found. + """ + + parts = name.split('.') + cursor = len(parts) - 1 + module_name = parts[:cursor] + attr_name = parts[-1] + + while cursor > 0: + try: + ret = __import__('.'.join(module_name), fromlist=[attr_name]) + break + except ImportError: + if cursor == 0: + raise + cursor -= 1 + module_name = parts[:cursor] + attr_name = parts[cursor] + ret = '' + + for part in parts[cursor:]: + try: + ret = getattr(ret, part) + except AttributeError: + raise ImportError(name) + + return ret + + +def cfg_to_args(path='setup.cfg'): + """ Distutils2 to distutils1 compatibility util. + + This method uses an existing setup.cfg to generate a dictionary of + keywords that can be used by distutils.core.setup(kwargs**). + + :param file: + The setup.cfg path. + :raises DistutilsFileError: + When the setup.cfg file is not found. + + """ + + # The method source code really starts here. + parser = RawConfigParser() + if not os.path.exists(path): + raise DistutilsFileError("file '%s' does not exist" % + os.path.abspath(path)) + parser.read(path) + config = {} + for section in parser.sections(): + config[section] = dict(parser.items(section)) + + # Run setup_hooks, if configured + setup_hooks = has_get_option(config, 'global', 'setup_hooks') + package_dir = has_get_option(config, 'files', 'packages_root') + + # Add the source package directory to sys.path in case it contains + # additional hooks, and to make sure it's on the path before any existing + # installations of the package + if package_dir: + package_dir = os.path.abspath(package_dir) + sys.path.insert(0, package_dir) + + try: + if setup_hooks: + setup_hooks = split_multiline(setup_hooks) + for hook in setup_hooks: + hook_fn = resolve_name(hook) + try : + hook_fn(config) + except SystemExit: + log.error('setup hook %s terminated the installation') + except: + e = sys.exc_info()[1] + log.error('setup hook %s raised exception: %s\n' % + (hook, e)) + log.error(traceback.format_exc()) + sys.exit(1) + + kwargs = setup_cfg_to_setup_kwargs(config) + + register_custom_compilers(config) + + ext_modules = get_extension_modules(config) + if ext_modules: + kwargs['ext_modules'] = ext_modules + + entry_points = get_entry_points(config) + if entry_points: + kwargs['entry_points'] = entry_points + + wrap_commands(kwargs) + + # Handle the [files]/extra_files option + extra_files = has_get_option(config, 'files', 'extra_files') + if extra_files: + extra_files = split_multiline(extra_files) + # Let's do a sanity check + for filename in extra_files: + if not os.path.exists(filename): + raise DistutilsFileError( + '%s from the extra_files option in setup.cfg does not ' + 'exist' % filename) + # Unfortunately the only really sensible way to do this is to + # monkey-patch the manifest_maker class + @monkeypatch_method(manifest_maker) + def add_defaults(self, extra_files=extra_files, log=log): + log.info('[d2to1] running patched manifest_maker command ' + 'with extra_files support') + add_defaults._orig(self) + self.filelist.extend(extra_files) + + finally: + # Perform cleanup if any paths were added to sys.path + if package_dir: + sys.path.pop(0) + + return kwargs + + +def setup_cfg_to_setup_kwargs(config): + """Processes the setup.cfg options and converts them to arguments accepted + by setuptools' setup() function. + """ + + kwargs = {} + + for arg in D1_D2_SETUP_ARGS: + if len(D1_D2_SETUP_ARGS[arg]) == 2: + # The distutils field name is different than distutils2's. + section, option = D1_D2_SETUP_ARGS[arg] + + elif len(D1_D2_SETUP_ARGS[arg]) == 1: + # The distutils field name is the same thant distutils2's. + section = D1_D2_SETUP_ARGS[arg][0] + option = arg + + in_cfg_value = has_get_option(config, section, option) + if not in_cfg_value: + # There is no such option in the setup.cfg + if arg == "long_description": + in_cfg_value = has_get_option(config, section, + "description_file") + if in_cfg_value: + in_cfg_value = split_multiline(in_cfg_value) + value = '' + for filename in in_cfg_value: + description_file = open(filename) + try: + value += description_file.read().strip() + '\n\n' + finally: + description_file.close() + in_cfg_value = value + else: + continue + + if arg in CSV_FIELDS: + in_cfg_value = split_csv(in_cfg_value) + if arg in MULTI_FIELDS: + in_cfg_value = split_multiline(in_cfg_value) + elif arg in BOOL_FIELDS: + # Provide some flexibility here... + if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'): + in_cfg_value = True + else: + in_cfg_value = False + + if in_cfg_value: + if arg in ('install_requires', 'tests_require'): + # Replaces PEP345-style version specs with the sort expected by + # setuptools + in_cfg_value = [_VERSION_SPEC_RE.sub(r'\1\2', pred) + for pred in in_cfg_value] + elif arg == 'package_dir': + in_cfg_value = {'': in_cfg_value} + elif arg in ('package_data', 'data_files'): + data_files = {} + firstline = True + prev = None + for line in in_cfg_value: + if '=' in line: + key, value = line.split('=', 1) + key, value = (key.strip(), value.strip()) + if key in data_files: + # Multiple duplicates of the same package name; + # this is for backwards compatibility of the old + # format prior to d2to1 0.2.6. + prev = data_files[key] + prev.extend(value.split()) + else: + prev = data_files[key.strip()] = value.split() + elif firstline: + raise DistutilsOptionError( + 'malformed package_data first line %r (misses ' + '"=")' % line) + else: + prev.extend(line.strip().split()) + firstline = False + if arg == 'data_files': + # the data_files value is a pointlessly different structure + # from the package_data value + data_files = list(data_files.items()) + in_cfg_value = data_files + elif arg == 'cmdclass': + cmdclass = {} + dist = Distribution() + for cls in in_cfg_value: + cls = resolve_name(cls) + cmd = cls(dist) + cmdclass[cmd.get_command_name()] = cls + in_cfg_value = cmdclass + + kwargs[arg] = in_cfg_value + + return kwargs + + +def register_custom_compilers(config): + """Handle custom compilers; this has no real equivalent in distutils, where + additional compilers could only be added programmatically, so we have to + hack it in somehow. + """ + + compilers = has_get_option(config, 'global', 'compilers') + if compilers: + compilers = split_multiline(compilers) + for compiler in compilers: + compiler = resolve_name(compiler) + + # In distutils2 compilers these class attributes exist; for + # distutils1 we just have to make something up + if hasattr(compiler, 'name'): + name = compiler.name + else: + name = compiler.__name__ + if hasattr(compiler, 'description'): + desc = compiler.description + else: + desc = 'custom compiler %s' % name + + module_name = compiler.__module__ + # Note; this *will* override built in compilers with the same name + # TODO: Maybe display a warning about this? + cc = distutils.ccompiler.compiler_class + cc[name] = (module_name, compiler.__name__, desc) + + # HACK!!!! Distutils assumes all compiler modules are in the + # distutils package + sys.modules['distutils.' + module_name] = sys.modules[module_name] + + +def get_extension_modules(config): + """Handle extension modules""" + + EXTENSION_FIELDS = ("sources", + "include_dirs", + "define_macros", + "undef_macros", + "library_dirs", + "libraries", + "runtime_library_dirs", + "extra_objects", + "extra_compile_args", + "extra_link_args", + "export_symbols", + "swig_opts", + "depends") + + ext_modules = [] + for section in config: + if ':' in section: + labels = section.split(':', 1) + else: + # Backwards compatibility for old syntax; don't use this though + labels = section.split('=', 1) + labels = [l.strip() for l in labels] + if (len(labels) == 2) and (labels[0] == 'extension'): + ext_args = {} + for field in EXTENSION_FIELDS: + value = has_get_option(config, section, field) + # All extension module options besides name can have multiple + # values + if not value: + continue + value = split_multiline(value) + if field == 'define_macros': + macros = [] + for macro in value: + macro = macro.split('=', 1) + if len(macro) == 1: + macro = (macro[0].strip(), None) + else: + macro = (macro[0].strip(), macro[1].strip()) + macros.append(macro) + value = macros + ext_args[field] = value + if ext_args: + if 'name' not in ext_args: + ext_args['name'] = labels[1] + ext_modules.append(Extension(ext_args.pop('name'), + **ext_args)) + return ext_modules + + +def get_entry_points(config): + """Process the [entry_points] section of setup.cfg to handle setuptools + entry points. This is, of course, not a standard feature of + distutils2/packaging, but as there is not currently a standard alternative + in packaging, we provide support for them. + """ + + if not 'entry_points' in config: + return {} + + return dict((option, split_multiline(value)) + for option, value in config['entry_points'].items()) + + +def wrap_commands(kwargs): + dist = Distribution() + + # This should suffice to get the same config values and command classes + # that the actual Distribution will see (not counting cmdclass, which is + # handled below) + dist.parse_config_files() + + for cmd, _ in dist.get_command_list(): + hooks = {} + for opt, val in dist.get_option_dict(cmd).items(): + val = val[1] + if opt.startswith('pre_hook.') or opt.startswith('post_hook.'): + hook_type, alias = opt.split('.', 1) + hook_dict = hooks.setdefault(hook_type, {}) + hook_dict[alias] = val + if not hooks: + continue + + if 'cmdclass' in kwargs and cmd in kwargs['cmdclass']: + cmdclass = kwargs['cmdclass'][cmd] + else: + cmdclass = dist.get_command_class(cmd) + + new_cmdclass = wrap_command(cmd, cmdclass, hooks) + kwargs.setdefault('cmdclass', {})[cmd] = new_cmdclass + + +def wrap_command(cmd, cmdclass, hooks): + def run(self, cmdclass=cmdclass): + self.run_command_hooks('pre_hook') + cmdclass.run(self) + self.run_command_hooks('post_hook') + + return type(cmd, (cmdclass, object), + {'run': run, 'run_command_hooks': run_command_hooks, + 'pre_hook': hooks.get('pre_hook'), + 'post_hook': hooks.get('post_hook')}) + + +def run_command_hooks(cmd_obj, hook_kind): + """Run hooks registered for that command and phase. + + *cmd_obj* is a finalized command object; *hook_kind* is either + 'pre_hook' or 'post_hook'. + """ + + if hook_kind not in ('pre_hook', 'post_hook'): + raise ValueError('invalid hook kind: %r' % hook_kind) + + hooks = getattr(cmd_obj, hook_kind, None) + + if hooks is None: + return + + for hook in hooks.values(): + if isinstance(hook, str): + try: + hook_obj = resolve_name(hook) + except ImportError: + err = sys.exc_info()[1] # For py3k + raise DistutilsModuleError('cannot find hook %s: %s' % + (hook,err)) + else: + hook_obj = hook + + if not hasattr(hook_obj, '__call__'): + raise DistutilsOptionError('hook %r is not callable' % hook) + + log.info('running %s %s for command %s', + hook_kind, hook, cmd_obj.get_command_name()) + + try : + hook_obj(cmd_obj) + except: + e = sys.exc_info()[1] + log.error('hook %s raised exception: %s\n' % (hook, e)) + log.error(traceback.format_exc()) + sys.exit(1) + + +def has_get_option(config, section, option): + if section in config and option in config[section]: + return config[section][option] + elif section in config and option.replace('_', '-') in config[section]: + return config[section][option.replace('_', '-')] + else: + return False + + +def split_multiline(value): + """Special behaviour when we have a multi line options""" + + value = [element for element in + (line.strip() for line in value.split('\n')) + if element] + return value + + +def split_csv(value): + """Special behaviour when we have a comma separated options""" + + value = [element for element in + (chunk.strip() for chunk in value.split(',')) + if element] + return value + + +def monkeypatch_method(cls): + """A function decorator to monkey-patch a method of the same name on the + given class. + """ + + def wrapper(func): + orig = getattr(cls, func.__name__, None) + if orig and not hasattr(orig, '_orig'): # Already patched + setattr(func, '_orig', orig) + setattr(cls, func.__name__, func) + return func + + return wrapper + + +# The following classes are used to hack Distribution.command_options a bit +class DefaultGetDict(defaultdict): + """Like defaultdict, but the get() method also sets and returns the default + value. + """ + + def get(self, key, default=None): + if default is None: + default = self.default_factory() + return super(DefaultGetDict, self).setdefault(key, default) + + +class IgnoreDict(dict): + """A dictionary that ignores any insertions in which the key is a string + matching any string in `ignore`. The ignore list can also contain wildcard + patterns using '*'. + """ + + def __init__(self, ignore): + self.__ignore = re.compile(r'(%s)' % ('|'.join( + [pat.replace('*', '.*') + for pat in ignore]))) + + def __setitem__(self, key, val): + if self.__ignore.match(key): + return + super(IgnoreDict, self).__setitem__(key, val) diff --git a/awx/lib/site-packages/d2to1/zestreleaser.py b/awx/lib/site-packages/d2to1/zestreleaser.py new file mode 100644 index 0000000000..a2b663208a --- /dev/null +++ b/awx/lib/site-packages/d2to1/zestreleaser.py @@ -0,0 +1,161 @@ +"""zest.releaser entry points to support projects using distutils2-like +setup.cfg files. The only actual functionality this adds is to update the +version option in a setup.cfg file, if it exists. If setup.cfg does not exist, +or does not contain a version option, then this does nothing. + +TODO: d2to1 theoretically supports using a different filename for setup.cfg; +this does not support that. We could hack in support, though I'm not sure how +useful the original functionality is to begin with (and it might be removed) so +we ignore that for now. + +TODO: There exists a proposal +(http://mail.python.org/pipermail/distutils-sig/2011-March/017628.html) to add +a 'version-from-file' option (or something of the like) to distutils2; if this +is added then support for it should be included here as well. +""" + + +import logging +import os + +from .extern.six import print_ +from .extern.six import moves as m +ConfigParser = m.configparser.ConfigParser + + +logger = logging.getLogger(__name__) + + + +def update_setupcfg_version(filename, version): + """Opens the given setup.cfg file, locates the version option in the + [metadata] section, updates it to the new version. + """ + + setup_cfg = open(filename).readlines() + current_section = None + updated = False + + for idx, line in enumerate(setup_cfg): + m = ConfigParser.SECTCRE.match(line) + if m: + if current_section == 'metadata': + # We already parsed the entire metadata section without finding + # a version line, and are now moving into a new section + break + current_section = m.group('header') + continue + + if '=' not in line: + continue + + opt, val = line.split('=', 1) + opt, val = opt.strip(), val.strip() + if current_section == 'metadata' and opt == 'version': + setup_cfg[idx] = 'version = %s\n' % version + updated = True + break + + if updated: + open(filename, 'w').writelines(setup_cfg) + logger.info("Set %s's version to %r" % (os.path.basename(filename), + version)) + + +def prereleaser_middle(data): + filename = os.path.join(data['workingdir'], 'setup.cfg') + if os.path.exists(filename): + update_setupcfg_version(filename, data['new_version']) + + +def releaser_middle(data): + """ + releaser.middle hook to monkey-patch zest.releaser to support signed + tagging--currently this is the only way to do this. Also monkey-patches to + disable an annoyance where zest.releaser only creates .zip source + distributions. This is supposedly a workaround for a bug in Python 2.4, + but we don't care about Python 2.4. + """ + + import os + import sys + + from zest.releaser.git import Git + from zest.releaser.release import Releaser + + # Copied verbatim from zest.releaser, but with the cmd string modified to + # use the -s option to create a signed tag + def _my_create_tag(self, version): + msg = "Tagging %s" % (version,) + cmd = 'git tag -s %s -m "%s"' % (version, msg) + if os.path.isdir('.git/svn'): + print_("\nEXPERIMENTAL support for git-svn tagging!\n") + cur_branch = open('.git/HEAD').read().strip().split('/')[-1] + print_("You are on branch %s." % (cur_branch,)) + if cur_branch != 'master': + print_("Only the master branch is supported for git-svn " + "tagging.") + print_("Please tag yourself.") + print_("'git tag' needs to list tag named %s." % (version,)) + sys.exit() + cmd = [cmd] + local_head = open('.git/refs/heads/master').read() + trunk = open('.git/refs/remotes/trunk').read() + if local_head != trunk: + print_("Your local master diverges from trunk.\n") + # dcommit before local tagging + cmd.insert(0, 'git svn dcommit') + # create tag in svn + cmd.append('git svn tag -m "%s" %s' % (msg, version)) + return cmd + + # Similarly copied from zer.releaser to support use of 'v' in front + # of the version number + def _my_make_tag(self): + from zest.releaser import utils + from os import system + + if self.data['tag_already_exists']: + return + cmds = self.vcs.cmd_create_tag(self.data['version']) + if not isinstance(cmds, list): + cmds = [cmds] + if len(cmds) == 1: + print_("Tag needed to proceed, you can use the following command:") + for cmd in cmds: + print_(cmd) + if utils.ask("Run this command"): + print_(system(cmd)) + else: + # all commands are needed in order to proceed normally + print_("Please create a tag for %s yourself and rerun." % \ + (self.data['version'],)) + sys.exit() + if not self.vcs.tag_exists('v' + self.data['version']): + print_("\nFailed to create tag %s!" % (self.data['version'],)) + sys.exit() + + # Normally all this does is to return '--formats=zip', which is currently + # hard-coded as an option to always add to the sdist command; they ought to + # make this actually optional + def _my_sdist_options(self): + return '' + + Git.cmd_create_tag = _my_create_tag + Releaser._make_tag = _my_make_tag + Releaser._sdist_options = _my_sdist_options + + +def postreleaser_before(data): + """ + Fix the irritating .dev0 default appended to new development versions by + zest.releaser to just append ".dev" without the "0". + """ + + data['dev_version_template'] = '%(new_version)s.dev' + + +def postreleaser_middle(data): + filename = os.path.join(data['workingdir'], 'setup.cfg') + if os.path.exists(filename): + update_setupcfg_version(filename, data['dev_version']) diff --git a/awx/lib/site-packages/django_extensions/__init__.py b/awx/lib/site-packages/django_extensions/__init__.py index 3426dc3739..9c5891c6bf 100644 --- a/awx/lib/site-packages/django_extensions/__init__.py +++ b/awx/lib/site-packages/django_extensions/__init__.py @@ -1,5 +1,5 @@ -VERSION = (1, 2, 0) +VERSION = (1, 2, 2) # Dynamically calculate the version based on VERSION tuple if len(VERSION) > 2 and VERSION[2] is not None: diff --git a/awx/lib/site-packages/django_extensions/admin/widgets.py b/awx/lib/site-packages/django_extensions/admin/widgets.py index 6a2a4af959..1f29d0fe78 100644 --- a/awx/lib/site-packages/django_extensions/admin/widgets.py +++ b/awx/lib/site-packages/django_extensions/admin/widgets.py @@ -93,4 +93,4 @@ class ForeignKeySearchInput(ForeignKeyRawIdWidget): 'django_extensions/widgets/foreignkey_searchinput.html', ), context)) output.reverse() - return mark_safe(six.u(''.join(output))) + return mark_safe(six.u('').join(output)) diff --git a/awx/lib/site-packages/django_extensions/db/fields/encrypted.py b/awx/lib/site-packages/django_extensions/db/fields/encrypted.py index f897598a96..fd0158ca0c 100644 --- a/awx/lib/site-packages/django_extensions/db/fields/encrypted.py +++ b/awx/lib/site-packages/django_extensions/db/fields/encrypted.py @@ -21,9 +21,8 @@ class BaseEncryptedField(models.Field): def __init__(self, *args, **kwargs): if not hasattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR'): - raise ImproperlyConfigured('You must set the ENCRYPTED_FIELD_KEYS_DIR ' + raise ImproperlyConfigured('You must set the settings.ENCRYPTED_FIELD_KEYS_DIR ' 'setting to your Keyczar keys directory.') - crypt_class = self.get_crypt_class() self.crypt = crypt_class.Read(settings.ENCRYPTED_FIELD_KEYS_DIR) @@ -67,9 +66,12 @@ class BaseEncryptedField(models.Field): if isinstance(self.crypt.primary_key, keyczar.keys.RsaPublicKey): retval = value elif value and (value.startswith(self.prefix)): - retval = self.crypt.Decrypt(value[len(self.prefix):]) - if retval: - retval = retval.decode('utf-8') + if hasattr(self.crypt, 'Decrypt'): + retval = self.crypt.Decrypt(value[len(self.prefix):]) + if retval: + retval = retval.decode('utf-8') + else: + retval = value else: retval = value return retval diff --git a/awx/lib/site-packages/django_extensions/management/commands/runprofileserver.py b/awx/lib/site-packages/django_extensions/management/commands/runprofileserver.py index 7798a284e6..84206a3c98 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/runprofileserver.py +++ b/awx/lib/site-packages/django_extensions/management/commands/runprofileserver.py @@ -141,12 +141,16 @@ class Command(BaseCommand): def handle(self, addrport='', *args, **options): import django from django.core.servers.basehttp import run, WSGIServerException + try: + from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler + except ImportError: + from django.core.handlers.wsgi import WSGIHandler # noqa + try: from django.core.servers.basehttp import AdminMediaHandler HAS_ADMINMEDIAHANDLER = True except ImportError: HAS_ADMINMEDIAHANDLER = False - from django.core.handlers.wsgi import WSGIHandler if args: raise CommandError('Usage is runserver %s' % self.args) diff --git a/awx/lib/site-packages/django_extensions/tests/__init__.py b/awx/lib/site-packages/django_extensions/tests/__init__.py index c251bab6b7..2da4f14d00 100644 --- a/awx/lib/site-packages/django_extensions/tests/__init__.py +++ b/awx/lib/site-packages/django_extensions/tests/__init__.py @@ -8,12 +8,12 @@ from django_extensions.tests.management_command import CommandTest, ShowTemplate __test_classes__ = [ - DumpScriptTests, JsonFieldTest, UUIDFieldTest, AutoSlugFieldTest, CommandTest, ShowTemplateTagsTests, TruncateLetterTests + DumpScriptTests, JsonFieldTest, UUIDFieldTest, AutoSlugFieldTest, CommandTest, + ShowTemplateTagsTests, TruncateLetterTests ] try: from django_extensions.tests.encrypted_fields import EncryptedFieldsTestCase - from django_extensions.tests.models import Secret # NOQA __test_classes__.append(EncryptedFieldsTestCase) except ImportError: pass diff --git a/awx/lib/site-packages/django_extensions/tests/encrypted_fields.py b/awx/lib/site-packages/django_extensions/tests/encrypted_fields.py index 1451c34e3f..ad391e83ac 100644 --- a/awx/lib/site-packages/django_extensions/tests/encrypted_fields.py +++ b/awx/lib/site-packages/django_extensions/tests/encrypted_fields.py @@ -1,26 +1,134 @@ -from django.db import connection +from contextlib import contextmanager +import functools + from django.conf import settings from django.core.management import call_command +from django.db import connection, models from django.db.models import loading from django.utils import unittest -# Only perform encrypted fields tests if keyczar is present -# Resolves http://github.com/django-extensions/django-extensions/issues/#issue/17 +from django_extensions.tests.models import Secret + +# Only perform encrypted fields tests if keyczar is present. Resolves +# http://github.com/django-extensions/django-extensions/issues/#issue/17 try: - from keyczar import keyczar, keyczart, keyinfo # NOQA - from django_extensions.tests.models import Secret from django_extensions.db.fields.encrypted import EncryptedTextField, EncryptedCharField # NOQA - keyczar_active = hasattr(settings, "ENCRYPTED_FIELD_KEYS_DIR") + from keyczar import keyczar, keyczart, keyinfo # NOQA + keyczar_active = True except ImportError: keyczar_active = False -class EncryptedFieldsTestCase(unittest.TestCase): +def run_if_active(func): + "Method decorator that only runs a test if KeyCzar is available." - def __init__(self, *args, **kwargs): - if keyczar_active: - self.crypt = keyczar.Crypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR) - super(EncryptedFieldsTestCase, self).__init__(*args, **kwargs) + @functools.wraps(func) + def inner(self): + if not keyczar_active: + return + return func(self) + return inner + + +# Locations of both private and public keys. +KEY_LOCS = getattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR', {}) + + +@contextmanager +def keys(purpose, mode=None): + """ + A context manager that sets up the correct KeyCzar environment for a test. + + Arguments: + purpose: Either keyczar.keyinfo.DECRYPT_AND_ENCRYPT or + keyczar.keyinfo.ENCRYPT. + mode: If truthy, settings.ENCRYPTED_FIELD_MODE will be set to (and then + reverted from) this value. If falsy, settings.ENCRYPTED_FIELD_MODE + will not be changed. Optional. Default: None. + + Yields: + A Keyczar subclass for the stated purpose. This will be keyczar.Crypter + for DECRYPT_AND_ENCRYPT or keyczar.Encrypter for ENCRYPT. In addition, + settings.ENCRYPTED_FIELD_KEYS_DIR will be set correctly, and then + reverted when the manager exits. + """ + # Store the original settings so we can restore when the manager exits. + orig_setting_dir = getattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR', None) + orig_setting_mode = getattr(settings, 'ENCRYPTED_FIELD_MODE', None) + try: + if mode: + settings.ENCRYPTED_FIELD_MODE = mode + + if purpose == keyinfo.DECRYPT_AND_ENCRYPT: + settings.ENCRYPTED_FIELD_KEYS_DIR = KEY_LOCS['DECRYPT_AND_ENCRYPT'] + yield keyczar.Crypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR) + else: + settings.ENCRYPTED_FIELD_KEYS_DIR = KEY_LOCS['ENCRYPT'] + yield keyczar.Encrypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR) + + except: + raise # Reraise any exceptions. + + finally: + # Restore settings. + settings.ENCRYPTED_FIELD_KEYS_DIR = orig_setting_dir + if mode: + if orig_setting_mode: + settings.ENCRYPTED_FIELD_MODE = orig_setting_mode + else: + del settings.ENCRYPTED_FIELD_MODE + + +@contextmanager +def secret_model(): + """ + A context manager that yields a Secret model defined at runtime. + + All EncryptedField init logic occurs at model class definition time, not at + object instantiation time. This means that in order to test different keys + and modes, we must generate a new class definition at runtime, after + establishing the correct KeyCzar settings. This context manager handles + that process. + + See http://dynamic-models.readthedocs.org/en/latest/ and + https://docs.djangoproject.com/en/dev/topics/db/models/ + #differences-between-proxy-inheritance-and-unmanaged-models + """ + + # Store Django's cached model, if present, so we can restore when the + # manager exits. + orig_model = None + try: + orig_model = loading.cache.app_models['tests']['secret'] + del loading.cache.app_models['tests']['secret'] + except KeyError: + pass + + try: + # Create a new class that shadows tests.models.Secret. + attrs = { + 'name': EncryptedCharField("Name", max_length=Secret._meta.get_field('name').max_length), + 'text': EncryptedTextField("Text"), + '__module__': 'django_extensions.tests.models', + 'Meta': type('Meta', (object, ), { + 'managed': False, + 'db_table': Secret._meta.db_table + }) + } + yield type('Secret', (models.Model, ), attrs) + + except: + raise # Reraise any exceptions. + + finally: + # Restore Django's model cache. + try: + loading.cache.app_models['tests']['secret'] = orig_model + except KeyError: + pass + + +class EncryptedFieldsTestCase(unittest.TestCase): def setUp(self): self.old_installed_apps = settings.INSTALLED_APPS @@ -32,43 +140,131 @@ class EncryptedFieldsTestCase(unittest.TestCase): def tearDown(self): settings.INSTALLED_APPS = self.old_installed_apps + @run_if_active def testCharFieldCreate(self): - if not keyczar_active: - return - test_val = "Test Secret" - secret = Secret.objects.create(name=test_val) - cursor = connection.cursor() - query = "SELECT name FROM %s WHERE id = %d" % (Secret._meta.db_table, secret.id) - cursor.execute(query) - db_val, = cursor.fetchone() - decrypted_val = self.crypt.Decrypt(db_val[len(EncryptedCharField.prefix):]) - self.assertEqual(test_val, decrypted_val) + """ + Uses a private key to encrypt data on model creation. + Verifies the data is encrypted in the database and can be decrypted. + """ + with keys(keyinfo.DECRYPT_AND_ENCRYPT) as crypt: + with secret_model() as model: + test_val = "Test Secret" + secret = model.objects.create(name=test_val) + cursor = connection.cursor() + query = "SELECT name FROM %s WHERE id = %d" % (model._meta.db_table, secret.id) + cursor.execute(query) + db_val, = cursor.fetchone() + decrypted_val = crypt.Decrypt(db_val[len(EncryptedCharField.prefix):]) + self.assertEqual(test_val, decrypted_val) + + @run_if_active def testCharFieldRead(self): - if not keyczar_active: - return - test_val = "Test Secret" - secret = Secret.objects.create(name=test_val) - retrieved_secret = Secret.objects.get(id=secret.id) - self.assertEqual(test_val, retrieved_secret.name) + """ + Uses a private key to encrypt data on model creation. + Verifies the data is decrypted when reading the value back from the + model. + """ + with keys(keyinfo.DECRYPT_AND_ENCRYPT): + with secret_model() as model: + test_val = "Test Secret" + secret = model.objects.create(name=test_val) + retrieved_secret = model.objects.get(id=secret.id) + self.assertEqual(test_val, retrieved_secret.name) + @run_if_active def testTextFieldCreate(self): - if not keyczar_active: - return - test_val = "Test Secret" - secret = Secret.objects.create(text=test_val) - cursor = connection.cursor() - query = "SELECT text FROM %s WHERE id = %d" % (Secret._meta.db_table, secret.id) - cursor.execute(query) - db_val, = cursor.fetchone() - decrypted_val = self.crypt.Decrypt(db_val[len(EncryptedCharField.prefix):]) - self.assertEqual(test_val, decrypted_val) + """ + Uses a private key to encrypt data on model creation. + Verifies the data is encrypted in the database and can be decrypted. + """ + with keys(keyinfo.DECRYPT_AND_ENCRYPT) as crypt: + with secret_model() as model: + test_val = "Test Secret" + secret = model.objects.create(text=test_val) + cursor = connection.cursor() + query = "SELECT text FROM %s WHERE id = %d" % (model._meta.db_table, secret.id) + cursor.execute(query) + db_val, = cursor.fetchone() + decrypted_val = crypt.Decrypt(db_val[len(EncryptedCharField.prefix):]) + self.assertEqual(test_val, decrypted_val) + @run_if_active def testTextFieldRead(self): - if not keyczar_active: - return - test_val = "Test Secret" - secret = Secret.objects.create(text=test_val) - retrieved_secret = Secret.objects.get(id=secret.id) - self.assertEqual(test_val, retrieved_secret.text) + """ + Uses a private key to encrypt data on model creation. + Verifies the data is decrypted when reading the value back from the + model. + """ + with keys(keyinfo.DECRYPT_AND_ENCRYPT): + with secret_model() as model: + test_val = "Test Secret" + secret = model.objects.create(text=test_val) + retrieved_secret = model.objects.get(id=secret.id) + self.assertEqual(test_val, retrieved_secret.text) + + @run_if_active + def testCannotDecrypt(self): + """ + Uses a public key to encrypt data on model creation. + Verifies that the data cannot be decrypted using the same key. + """ + with keys(keyinfo.ENCRYPT, mode=keyinfo.ENCRYPT.name): + with secret_model() as model: + test_val = "Test Secret" + secret = model.objects.create(name=test_val) + retrieved_secret = model.objects.get(id=secret.id) + self.assertNotEqual(test_val, retrieved_secret.name) + self.assertTrue(retrieved_secret.name.startswith(EncryptedCharField.prefix)) + + @run_if_active + def testUnacceptablePurpose(self): + """ + Tries to create an encrypted field with a mode mismatch. + A purpose of "DECRYPT_AND_ENCRYPT" cannot be used with a public key, + since public keys cannot be used for decryption. This should raise an + exception. + """ + with self.assertRaises(keyczar.errors.KeyczarError): + with keys(keyinfo.ENCRYPT): + with secret_model(): + # A KeyCzar exception should get raised during class + # definition time, so any code in here would never get run. + pass + + @run_if_active + def testDecryptionForbidden(self): + """ + Uses a private key to encrypt data, but decryption is not allowed. + ENCRYPTED_FIELD_MODE is explicitly set to ENCRYPT, meaning data should + not be decrypted, even though the key would allow for it. + """ + with keys(keyinfo.DECRYPT_AND_ENCRYPT, mode=keyinfo.ENCRYPT.name): + with secret_model() as model: + test_val = "Test Secret" + secret = model.objects.create(name=test_val) + retrieved_secret = model.objects.get(id=secret.id) + self.assertNotEqual(test_val, retrieved_secret.name) + self.assertTrue(retrieved_secret.name.startswith(EncryptedCharField.prefix)) + + @run_if_active + def testEncryptPublicDecryptPrivate(self): + """ + Uses a public key to encrypt, and a private key to decrypt data. + """ + test_val = "Test Secret" + + # First, encrypt data with public key and save to db. + with keys(keyinfo.ENCRYPT, mode=keyinfo.ENCRYPT.name): + with secret_model() as model: + secret = model.objects.create(name=test_val) + enc_retrieved_secret = model.objects.get(id=secret.id) + self.assertNotEqual(test_val, enc_retrieved_secret.name) + self.assertTrue(enc_retrieved_secret.name.startswith(EncryptedCharField.prefix)) + + # Next, retrieve data from db, and decrypt with private key. + with keys(keyinfo.DECRYPT_AND_ENCRYPT): + with secret_model() as model: + retrieved_secret = model.objects.get(id=secret.id) + self.assertEqual(test_val, retrieved_secret.name) diff --git a/awx/lib/site-packages/django_extensions/tests/models.py b/awx/lib/site-packages/django_extensions/tests/models.py index f8deab6a10..b3a0aadbd1 100644 --- a/awx/lib/site-packages/django_extensions/tests/models.py +++ b/awx/lib/site-packages/django_extensions/tests/models.py @@ -1,23 +1,9 @@ from django.db import models -from django.conf import settings - -try: - from django_extensions.db.fields.encrypted import EncryptedTextField, EncryptedCharField - if not hasattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR'): - raise ImportError -except ImportError: - class EncryptedCharField(object): - def __init__(self, *args, **kwargs): - pass - - class EncryptedTextField(object): - def __init__(self, *args, **kwargs): - pass class Secret(models.Model): - name = EncryptedCharField("Name", blank=True, max_length=255) - text = EncryptedTextField("Text", blank=True) + name = models.CharField(blank=True, max_length=255, null=True) + text = models.TextField(blank=True, null=True) class Name(models.Model): @@ -33,4 +19,3 @@ class Person(models.Model): age = models.PositiveIntegerField() children = models.ManyToManyField('self') notes = models.ManyToManyField(Note) - diff --git a/awx/lib/site-packages/djcelery/__init__.py b/awx/lib/site-packages/djcelery/__init__.py index fea512138a..5a54cba725 100644 --- a/awx/lib/site-packages/djcelery/__init__.py +++ b/awx/lib/site-packages/djcelery/__init__.py @@ -5,7 +5,7 @@ from __future__ import absolute_import import os -VERSION = (3, 0, 21) +VERSION = (3, 0, 23) __version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:]) __author__ = 'Ask Solem' __contact__ = 'ask@celeryproject.org' diff --git a/awx/lib/site-packages/djcelery/admin.py b/awx/lib/site-packages/djcelery/admin.py index 774b92cef2..336d72d02e 100644 --- a/awx/lib/site-packages/djcelery/admin.py +++ b/awx/lib/site-packages/djcelery/admin.py @@ -1,6 +1,8 @@ from __future__ import absolute_import from __future__ import with_statement +from anyjson import loads + from django import forms from django.conf import settings from django.contrib import admin @@ -88,8 +90,9 @@ class ModelMonitor(admin.ModelAdmin): def change_view(self, request, object_id, extra_context=None): extra_context = extra_context or {} extra_context.setdefault('title', self.detail_title) - return super(ModelMonitor, self).change_view(request, object_id, - extra_context) + return super(ModelMonitor, self).change_view( + request, object_id, extra_context=extra_context, + ) def has_delete_permission(self, request, obj=None): if not self.can_delete: @@ -143,7 +146,7 @@ class TaskMonitor(ModelMonitor): ] class Media: - css = {'all': ('djcelery/style.css',)} + css = {'all': ('djcelery/style.css', )} @action(_('Revoke selected tasks')) def revoke_tasks(self, request, queryset): @@ -268,6 +271,22 @@ def periodic_task_form(): raise exc return data + def _clean_json(self, field): + value = self.cleaned_data[field] + try: + loads(value) + except ValueError, exc: + raise forms.ValidationError( + _('Unable to parse JSON: %s') % exc, + ) + return value + + def clean_args(self): + return self._clean_json('args') + + def clean_kwargs(self): + return self._clean_json('kwargs') + return PeriodicTaskForm diff --git a/awx/lib/site-packages/djcelery/backends/cache.py b/awx/lib/site-packages/djcelery/backends/cache.py index 2bcb9dcbf7..747e80b63c 100644 --- a/awx/lib/site-packages/djcelery/backends/cache.py +++ b/awx/lib/site-packages/djcelery/backends/cache.py @@ -40,7 +40,7 @@ class DjangoMemcacheWrapper(object): from django.core.cache.backends.base import InvalidCacheBackendError try: from django.core.cache.backends.memcached import CacheClass -except InvalidCacheBackendError: +except (ImportError, InvalidCacheBackendError): pass else: if django.VERSION[0:2] < (1, 2) and isinstance(cache, CacheClass): diff --git a/awx/lib/site-packages/djcelery/contrib/test_runner.py b/awx/lib/site-packages/djcelery/contrib/test_runner.py index 2d11a8f53d..ca109dec30 100644 --- a/awx/lib/site-packages/djcelery/contrib/test_runner.py +++ b/awx/lib/site-packages/djcelery/contrib/test_runner.py @@ -1,13 +1,13 @@ from __future__ import absolute_import from uuid import uuid4 -from datetime import datetime from django.conf import settings from django.test.simple import DjangoTestSuiteRunner from celery.task import Task from djcelery.models import TaskState +from djcelery.utils import now USAGE = """\ @@ -62,7 +62,7 @@ class CeleryTestSuiteRunnerStoringResult(DjangoTestSuiteRunner): result=retval, args=args, kwargs=kwargs, - tstamp=datetime.now()) + tstamp=now()) Task.on_success = classmethod(on_success_patched) # Monkey-patch Task.on_failure() method @@ -74,7 +74,7 @@ class CeleryTestSuiteRunnerStoringResult(DjangoTestSuiteRunner): result=einfo, args=args, kwargs=kwargs, - tstamp=datetime.now()) + tstamp=now()) Task.on_failure = classmethod(on_failure_patched) # Call parent's version diff --git a/awx/lib/site-packages/djcelery/models.py b/awx/lib/site-packages/djcelery/models.py index 31403c6980..4ea1ec85ae 100644 --- a/awx/lib/site-packages/djcelery/models.py +++ b/awx/lib/site-packages/djcelery/models.py @@ -94,6 +94,7 @@ class IntervalSchedule(models.Model): class Meta: verbose_name = _(u'interval') verbose_name_plural = _(u'intervals') + ordering = ['period', 'every'] @property def schedule(self): @@ -137,6 +138,8 @@ class CrontabSchedule(models.Model): class Meta: verbose_name = _(u'crontab') verbose_name_plural = _(u'crontabs') + ordering = ['month_of_year', 'day_of_month', + 'day_of_week', 'hour', 'minute'] def __unicode__(self): rfield = lambda f: f and str(f).replace(' ', '') or '*' diff --git a/awx/lib/site-packages/easy_install.py b/awx/lib/site-packages/easy_install.py new file mode 100644 index 0000000000..d87e984034 --- /dev/null +++ b/awx/lib/site-packages/easy_install.py @@ -0,0 +1,5 @@ +"""Run the EasyInstall command""" + +if __name__ == '__main__': + from setuptools.command.easy_install import main + main() diff --git a/awx/lib/site-packages/httplib2/__init__.py b/awx/lib/site-packages/httplib2/__init__.py new file mode 100644 index 0000000000..9780d4e54c --- /dev/null +++ b/awx/lib/site-packages/httplib2/__init__.py @@ -0,0 +1,1657 @@ +from __future__ import generators +""" +httplib2 + +A caching http interface that supports ETags and gzip +to conserve bandwidth. + +Requires Python 2.3 or later + +Changelog: +2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. + +""" + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", + "James Antill", + "Xavier Verges Farrero", + "Jonathan Feinberg", + "Blair Zajac", + "Sam Ruby", + "Louis Nyffenegger"] +__license__ = "MIT" +__version__ = "0.8" + +import re +import sys +import email +import email.Utils +import email.Message +import email.FeedParser +import StringIO +import gzip +import zlib +import httplib +import urlparse +import urllib +import base64 +import os +import copy +import calendar +import time +import random +import errno +try: + from hashlib import sha1 as _sha, md5 as _md5 +except ImportError: + # prior to Python 2.5, these were separate modules + import sha + import md5 + _sha = sha.new + _md5 = md5.new +import hmac +from gettext import gettext as _ +import socket + +try: + from httplib2 import socks +except ImportError: + try: + import socks + except (ImportError, AttributeError): + socks = None + +# Build the appropriate socket wrapper for ssl +try: + import ssl # python 2.6 + ssl_SSLError = ssl.SSLError + def _ssl_wrap_socket(sock, key_file, cert_file, + disable_validation, ca_certs): + if disable_validation: + cert_reqs = ssl.CERT_NONE + else: + cert_reqs = ssl.CERT_REQUIRED + # We should be specifying SSL version 3 or TLS v1, but the ssl module + # doesn't expose the necessary knobs. So we need to go with the default + # of SSLv23. + return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file, + cert_reqs=cert_reqs, ca_certs=ca_certs) +except (AttributeError, ImportError): + ssl_SSLError = None + def _ssl_wrap_socket(sock, key_file, cert_file, + disable_validation, ca_certs): + if not disable_validation: + raise CertificateValidationUnsupported( + "SSL certificate validation is not supported without " + "the ssl module installed. To avoid this error, install " + "the ssl module, or explicity disable validation.") + ssl_sock = socket.ssl(sock, key_file, cert_file) + return httplib.FakeSocket(sock, ssl_sock) + + +if sys.version_info >= (2,3): + from iri2uri import iri2uri +else: + def iri2uri(uri): + return uri + +def has_timeout(timeout): # python 2.6 + if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'): + return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT) + return (timeout is not None) + +__all__ = [ + 'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation', + 'RedirectLimit', 'FailedToDecompressContent', + 'UnimplementedDigestAuthOptionError', + 'UnimplementedHmacDigestAuthOptionError', + 'debuglevel', 'ProxiesUnavailableError'] + + +# The httplib debug level, set to a non-zero value to get debug output +debuglevel = 0 + +# A request will be tried 'RETRIES' times if it fails at the socket/connection level. +RETRIES = 2 + +# Python 2.3 support +if sys.version_info < (2,4): + def sorted(seq): + seq.sort() + return seq + +# Python 2.3 support +def HTTPResponse__getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise httplib.ResponseNotReady() + return self.msg.items() + +if not hasattr(httplib.HTTPResponse, 'getheaders'): + httplib.HTTPResponse.getheaders = HTTPResponse__getheaders + +# All exceptions raised here derive from HttpLib2Error +class HttpLib2Error(Exception): pass + +# Some exceptions can be caught and optionally +# be turned back into responses. +class HttpLib2ErrorWithResponse(HttpLib2Error): + def __init__(self, desc, response, content): + self.response = response + self.content = content + HttpLib2Error.__init__(self, desc) + +class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass +class RedirectLimit(HttpLib2ErrorWithResponse): pass +class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass +class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass +class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass + +class MalformedHeader(HttpLib2Error): pass +class RelativeURIError(HttpLib2Error): pass +class ServerNotFoundError(HttpLib2Error): pass +class ProxiesUnavailableError(HttpLib2Error): pass +class CertificateValidationUnsupported(HttpLib2Error): pass +class SSLHandshakeError(HttpLib2Error): pass +class NotSupportedOnThisPlatform(HttpLib2Error): pass +class CertificateHostnameMismatch(SSLHandshakeError): + def __init__(self, desc, host, cert): + HttpLib2Error.__init__(self, desc) + self.host = host + self.cert = cert + +# Open Items: +# ----------- +# Proxy support + +# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) + +# Pluggable cache storage (supports storing the cache in +# flat files by default. We need a plug-in architecture +# that can support Berkeley DB and Squid) + +# == Known Issues == +# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. +# Does not handle Cache-Control: max-stale +# Does not use Age: headers when calculating cache freshness. + + +# The number of redirections to follow before giving up. +# Note that only GET redirects are automatically followed. +# Will also honor 301 requests by saving that info and never +# requesting that URI again. +DEFAULT_MAX_REDIRECTS = 5 + +try: + # Users can optionally provide a module that tells us where the CA_CERTS + # are located. + import ca_certs_locater + CA_CERTS = ca_certs_locater.get() +except ImportError: + # Default CA certificates file bundled with httplib2. + CA_CERTS = os.path.join( + os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt") + +# Which headers are hop-by-hop headers by default +HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] + +def _get_end2end_headers(response): + hopbyhop = list(HOP_BY_HOP) + hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) + return [header for header in response.keys() if header not in hopbyhop] + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + +def urlnorm(uri): + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) + authority = authority.lower() + scheme = scheme.lower() + if not path: + path = "/" + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + scheme = scheme.lower() + defrag_uri = scheme + "://" + authority + request_uri + return scheme, authority, request_uri, defrag_uri + + +# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) +re_url_scheme = re.compile(r'^\w+://') +re_slash = re.compile(r'[?/:|]+') + +def safename(filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + + try: + if re_url_scheme.match(filename): + if isinstance(filename,str): + filename = filename.decode('utf-8') + filename = filename.encode('idna') + else: + filename = filename.encode('idna') + except UnicodeError: + pass + if isinstance(filename,unicode): + filename=filename.encode('utf-8') + filemd5 = _md5(filename).hexdigest() + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + + # limit length of filename + if len(filename)>200: + filename=filename[:200] + return ",".join((filename, filemd5)) + +NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') +def _normalize_headers(headers): + return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) + +def _parse_cache_control(headers): + retval = {} + if headers.has_key('cache-control'): + parts = headers['cache-control'].split(',') + parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")] + parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")] + retval = dict(parts_with_args + parts_wo_args) + return retval + +# Whether to use a strict mode to parse WWW-Authenticate headers +# Might lead to bad results in case of ill-formed header value, +# so disabled by default, falling back to relaxed parsing. +# Set to true to turn on, usefull for testing servers. +USE_WWW_AUTH_STRICT_PARSING = 0 + +# In regex below: +# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP +# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space +# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: +# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"? +WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") +WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(? current_age: + retval = "FRESH" + return retval + +def _decompressContent(response, new_content): + content = new_content + try: + encoding = response.get('content-encoding', None) + if encoding in ['gzip', 'deflate']: + if encoding == 'gzip': + content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() + if encoding == 'deflate': + content = zlib.decompress(content) + response['content-length'] = str(len(content)) + # Record the historical presence of the encoding in a way the won't interfere. + response['-content-encoding'] = response['content-encoding'] + del response['content-encoding'] + except IOError: + content = "" + raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) + return content + +def _updateCache(request_headers, response_headers, content, cache, cachekey): + if cachekey: + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + if cc.has_key('no-store') or cc_response.has_key('no-store'): + cache.delete(cachekey) + else: + info = email.Message.Message() + for key, value in response_headers.iteritems(): + if key not in ['status','content-encoding','transfer-encoding']: + info[key] = value + + # Add annotations to the cache to indicate what headers + # are variant for this request. + vary = response_headers.get('vary', None) + if vary: + vary_headers = vary.lower().replace(' ', '').split(',') + for header in vary_headers: + key = '-varied-%s' % header + try: + info[key] = request_headers[header] + except KeyError: + pass + + status = response_headers.status + if status == 304: + status = 200 + + status_header = 'status: %d\r\n' % status + + header_str = info.as_string() + + header_str = re.sub("\r(?!\n)|(? 0: + service = "cl" + # No point in guessing Base or Spreadsheet + #elif request_uri.find("spreadsheets") > 0: + # service = "wise" + + auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) + resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) + lines = content.split('\n') + d = dict([tuple(line.split("=", 1)) for line in lines if line]) + if resp.status == 403: + self.Auth = "" + else: + self.Auth = d['Auth'] + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'GoogleLogin Auth=' + self.Auth + + +AUTH_SCHEME_CLASSES = { + "basic": BasicAuthentication, + "wsse": WsseAuthentication, + "digest": DigestAuthentication, + "hmacdigest": HmacDigestAuthentication, + "googlelogin": GoogleLoginAuthentication +} + +AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] + +class FileCache(object): + """Uses a local directory as a store for cached files. + Not really safe to use if multiple threads or processes are going to + be running on the same cache. + """ + def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior + self.cache = cache + self.safe = safe + if not os.path.exists(cache): + os.makedirs(self.cache) + + def get(self, key): + retval = None + cacheFullPath = os.path.join(self.cache, self.safe(key)) + try: + f = file(cacheFullPath, "rb") + retval = f.read() + f.close() + except IOError: + pass + return retval + + def set(self, key, value): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + f = file(cacheFullPath, "wb") + f.write(value) + f.close() + + def delete(self, key): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + if os.path.exists(cacheFullPath): + os.remove(cacheFullPath) + +class Credentials(object): + def __init__(self): + self.credentials = [] + + def add(self, name, password, domain=""): + self.credentials.append((domain.lower(), name, password)) + + def clear(self): + self.credentials = [] + + def iter(self, domain): + for (cdomain, name, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (name, password) + +class KeyCerts(Credentials): + """Identical to Credentials except that + name/password are mapped to key/cert.""" + pass + +class AllHosts(object): + pass + +class ProxyInfo(object): + """Collect information required to use a proxy.""" + bypass_hosts = () + + def __init__(self, proxy_type, proxy_host, proxy_port, + proxy_rdns=None, proxy_user=None, proxy_pass=None): + """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX + constants. For example: + + p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, + proxy_host='localhost', proxy_port=8000) + """ + self.proxy_type = proxy_type + self.proxy_host = proxy_host + self.proxy_port = proxy_port + self.proxy_rdns = proxy_rdns + self.proxy_user = proxy_user + self.proxy_pass = proxy_pass + + def astuple(self): + return (self.proxy_type, self.proxy_host, self.proxy_port, + self.proxy_rdns, self.proxy_user, self.proxy_pass) + + def isgood(self): + return (self.proxy_host != None) and (self.proxy_port != None) + + def applies_to(self, hostname): + return not self.bypass_host(hostname) + + def bypass_host(self, hostname): + """Has this host been excluded from the proxy config""" + if self.bypass_hosts is AllHosts: + return True + + bypass = False + for domain in self.bypass_hosts: + if hostname.endswith(domain): + bypass = True + + return bypass + + +def proxy_info_from_environment(method='http'): + """ + Read proxy info from the environment variables. + """ + if method not in ['http', 'https']: + return + + env_var = method + '_proxy' + url = os.environ.get(env_var, os.environ.get(env_var.upper())) + if not url: + return + pi = proxy_info_from_url(url, method) + + no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', '')) + bypass_hosts = [] + if no_proxy: + bypass_hosts = no_proxy.split(',') + # special case, no_proxy=* means all hosts bypassed + if no_proxy == '*': + bypass_hosts = AllHosts + + pi.bypass_hosts = bypass_hosts + return pi + +def proxy_info_from_url(url, method='http'): + """ + Construct a ProxyInfo from a URL (such as http_proxy env var) + """ + url = urlparse.urlparse(url) + username = None + password = None + port = None + if '@' in url[1]: + ident, host_port = url[1].split('@', 1) + if ':' in ident: + username, password = ident.split(':', 1) + else: + password = ident + else: + host_port = url[1] + if ':' in host_port: + host, port = host_port.split(':', 1) + else: + host = host_port + + if port: + port = int(port) + else: + port = dict(https=443, http=80)[method] + + proxy_type = 3 # socks.PROXY_TYPE_HTTP + return ProxyInfo( + proxy_type = proxy_type, + proxy_host = host, + proxy_port = port, + proxy_user = username or None, + proxy_pass = password or None, + ) + + +class HTTPConnectionWithTimeout(httplib.HTTPConnection): + """ + HTTPConnection subclass that supports timeouts + + All timeouts are in seconds. If None is passed for timeout then + Python's default timeout for sockets will be used. See for example + the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + """ + + def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): + httplib.HTTPConnection.__init__(self, host, port, strict) + self.timeout = timeout + self.proxy_info = proxy_info + + def connect(self): + """Connect to the host and port specified in __init__.""" + # Mostly verbatim from httplib.py. + if self.proxy_info and socks is None: + raise ProxiesUnavailableError( + 'Proxy support missing but proxy use was requested!') + msg = "getaddrinfo returns an empty list" + if self.proxy_info and self.proxy_info.isgood(): + use_proxy = True + proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple() + else: + use_proxy = False + if use_proxy and proxy_rdns: + host = proxy_host + port = proxy_port + else: + host = self.host + port = self.port + + for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + if use_proxy: + self.sock = socks.socksocket(af, socktype, proto) + self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass) + else: + self.sock = socket.socket(af, socktype, proto) + self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # Different from httplib: support timeouts. + if has_timeout(self.timeout): + self.sock.settimeout(self.timeout) + # End of difference from httplib. + if self.debuglevel > 0: + print "connect: (%s, %s) ************" % (self.host, self.port) + if use_proxy: + print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) + + self.sock.connect((self.host, self.port) + sa[2:]) + except socket.error, msg: + if self.debuglevel > 0: + print "connect fail: (%s, %s)" % (self.host, self.port) + if use_proxy: + print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + +class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): + """ + This class allows communication via SSL. + + All timeouts are in seconds. If None is passed for timeout then + Python's default timeout for sockets will be used. See for example + the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + """ + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=None, proxy_info=None, + ca_certs=None, disable_ssl_certificate_validation=False): + httplib.HTTPSConnection.__init__(self, host, port=port, + key_file=key_file, + cert_file=cert_file, strict=strict) + self.timeout = timeout + self.proxy_info = proxy_info + if ca_certs is None: + ca_certs = CA_CERTS + self.ca_certs = ca_certs + self.disable_ssl_certificate_validation = \ + disable_ssl_certificate_validation + + # The following two methods were adapted from https_wrapper.py, released + # with the Google Appengine SDK at + # http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py + # under the following license: + # + # Copyright 2007 Google Inc. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + + def _GetValidHostsForCert(self, cert): + """Returns a list of valid host globs for an SSL certificate. + + Args: + cert: A dictionary representing an SSL certificate. + Returns: + list: A list of valid host globs. + """ + if 'subjectAltName' in cert: + return [x[1] for x in cert['subjectAltName'] + if x[0].lower() == 'dns'] + else: + return [x[0][1] for x in cert['subject'] + if x[0][0].lower() == 'commonname'] + + def _ValidateCertificateHostname(self, cert, hostname): + """Validates that a given hostname is valid for an SSL certificate. + + Args: + cert: A dictionary representing an SSL certificate. + hostname: The hostname to test. + Returns: + bool: Whether or not the hostname is valid for this certificate. + """ + hosts = self._GetValidHostsForCert(cert) + for host in hosts: + host_re = host.replace('.', '\.').replace('*', '[^.]*') + if re.search('^%s$' % (host_re,), hostname, re.I): + return True + return False + + def connect(self): + "Connect to a host on a given (SSL) port." + + msg = "getaddrinfo returns an empty list" + if self.proxy_info and self.proxy_info.isgood(): + use_proxy = True + proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple() + else: + use_proxy = False + if use_proxy and proxy_rdns: + host = proxy_host + port = proxy_port + else: + host = self.host + port = self.port + + address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) + for family, socktype, proto, canonname, sockaddr in address_info: + try: + if use_proxy: + sock = socks.socksocket(family, socktype, proto) + + sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass) + else: + sock = socket.socket(family, socktype, proto) + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + if has_timeout(self.timeout): + sock.settimeout(self.timeout) + sock.connect((self.host, self.port)) + self.sock =_ssl_wrap_socket( + sock, self.key_file, self.cert_file, + self.disable_ssl_certificate_validation, self.ca_certs) + if self.debuglevel > 0: + print "connect: (%s, %s)" % (self.host, self.port) + if use_proxy: + print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) + if not self.disable_ssl_certificate_validation: + cert = self.sock.getpeercert() + hostname = self.host.split(':', 0)[0] + if not self._ValidateCertificateHostname(cert, hostname): + raise CertificateHostnameMismatch( + 'Server presented certificate that does not match ' + 'host %s: %s' % (hostname, cert), hostname, cert) + except ssl_SSLError, e: + if sock: + sock.close() + if self.sock: + self.sock.close() + self.sock = None + # Unfortunately the ssl module doesn't seem to provide any way + # to get at more detailed error information, in particular + # whether the error is due to certificate validation or + # something else (such as SSL protocol mismatch). + if e.errno == ssl.SSL_ERROR_SSL: + raise SSLHandshakeError(e) + else: + raise + except (socket.timeout, socket.gaierror): + raise + except socket.error, msg: + if self.debuglevel > 0: + print "connect fail: (%s, %s)" % (self.host, self.port) + if use_proxy: + print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + +SCHEME_TO_CONNECTION = { + 'http': HTTPConnectionWithTimeout, + 'https': HTTPSConnectionWithTimeout +} + +# Use a different connection object for Google App Engine +try: + try: + from google.appengine.api import apiproxy_stub_map + if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None: + raise ImportError # Bail out; we're not actually running on App Engine. + from google.appengine.api.urlfetch import fetch + from google.appengine.api.urlfetch import InvalidURLError + except (ImportError, AttributeError): + from google3.apphosting.api import apiproxy_stub_map + if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None: + raise ImportError # Bail out; we're not actually running on App Engine. + from google3.apphosting.api.urlfetch import fetch + from google3.apphosting.api.urlfetch import InvalidURLError + + def _new_fixed_fetch(validate_certificate): + def fixed_fetch(url, payload=None, method="GET", headers={}, + allow_truncated=False, follow_redirects=True, + deadline=5): + return fetch(url, payload=payload, method=method, headers=headers, + allow_truncated=allow_truncated, + follow_redirects=follow_redirects, deadline=deadline, + validate_certificate=validate_certificate) + return fixed_fetch + + class AppEngineHttpConnection(httplib.HTTPConnection): + """Use httplib on App Engine, but compensate for its weirdness. + + The parameters key_file, cert_file, proxy_info, ca_certs, and + disable_ssl_certificate_validation are all dropped on the ground. + """ + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=None, proxy_info=None, ca_certs=None, + disable_ssl_certificate_validation=False): + httplib.HTTPConnection.__init__(self, host, port=port, + strict=strict, timeout=timeout) + + class AppEngineHttpsConnection(httplib.HTTPSConnection): + """Same as AppEngineHttpConnection, but for HTTPS URIs.""" + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=None, proxy_info=None, ca_certs=None, + disable_ssl_certificate_validation=False): + httplib.HTTPSConnection.__init__(self, host, port=port, + key_file=key_file, + cert_file=cert_file, strict=strict, + timeout=timeout) + self._fetch = _new_fixed_fetch( + not disable_ssl_certificate_validation) + + # Update the connection classes to use the Googel App Engine specific ones. + SCHEME_TO_CONNECTION = { + 'http': AppEngineHttpConnection, + 'https': AppEngineHttpsConnection + } +except (ImportError, AttributeError): + pass + + +class Http(object): + """An HTTP client that handles: + + - all methods + - caching + - ETags + - compression, + - HTTPS + - Basic + - Digest + - WSSE + + and more. + """ + def __init__(self, cache=None, timeout=None, + proxy_info=proxy_info_from_environment, + ca_certs=None, disable_ssl_certificate_validation=False): + """If 'cache' is a string then it is used as a directory name for + a disk cache. Otherwise it must be an object that supports the + same interface as FileCache. + + All timeouts are in seconds. If None is passed for timeout + then Python's default timeout for sockets will be used. See + for example the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + + `proxy_info` may be: + - a callable that takes the http scheme ('http' or 'https') and + returns a ProxyInfo instance per request. By default, uses + proxy_nfo_from_environment. + - a ProxyInfo instance (static proxy config). + - None (proxy disabled). + + ca_certs is the path of a file containing root CA certificates for SSL + server certificate validation. By default, a CA cert file bundled with + httplib2 is used. + + If disable_ssl_certificate_validation is true, SSL cert validation will + not be performed. + """ + self.proxy_info = proxy_info + self.ca_certs = ca_certs + self.disable_ssl_certificate_validation = \ + disable_ssl_certificate_validation + + # Map domain name to an httplib connection + self.connections = {} + # The location of the cache, for now a directory + # where cached responses are held. + if cache and isinstance(cache, basestring): + self.cache = FileCache(cache) + else: + self.cache = cache + + # Name/password + self.credentials = Credentials() + + # Key/cert + self.certificates = KeyCerts() + + # authorization objects + self.authorizations = [] + + # If set to False then no redirects are followed, even safe ones. + self.follow_redirects = True + + # Which HTTP methods do we apply optimistic concurrency to, i.e. + # which methods get an "if-match:" etag header added to them. + self.optimistic_concurrency_methods = ["PUT", "PATCH"] + + # If 'follow_redirects' is True, and this is set to True then + # all redirecs are followed, including unsafe ones. + self.follow_all_redirects = False + + self.ignore_etag = False + + self.force_exception_to_status_code = False + + self.timeout = timeout + + # Keep Authorization: headers on a redirect. + self.forward_authorization_headers = False + + def __getstate__(self): + state_dict = copy.copy(self.__dict__) + # In case request is augmented by some foreign object such as + # credentials which handle auth + if 'request' in state_dict: + del state_dict['request'] + if 'connections' in state_dict: + del state_dict['connections'] + return state_dict + + def __setstate__(self, state): + self.__dict__.update(state) + self.connections = {} + + def _auth_from_challenge(self, host, request_uri, headers, response, content): + """A generator that creates Authorization objects + that can be applied to requests. + """ + challenges = _parse_www_authenticate(response, 'www-authenticate') + for cred in self.credentials.iter(host): + for scheme in AUTH_SCHEME_ORDER: + if challenges.has_key(scheme): + yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) + + def add_credentials(self, name, password, domain=""): + """Add a name and password that will be used + any time a request requires authentication.""" + self.credentials.add(name, password, domain) + + def add_certificate(self, key, cert, domain): + """Add a key and cert that will be used + any time a request requires authentication.""" + self.certificates.add(key, cert, domain) + + def clear_credentials(self): + """Remove all the names and passwords + that are used for authentication""" + self.credentials.clear() + self.authorizations = [] + + def _conn_request(self, conn, request_uri, method, body, headers): + for i in range(RETRIES): + try: + if hasattr(conn, 'sock') and conn.sock is None: + conn.connect() + conn.request(method, request_uri, body, headers) + except socket.timeout: + raise + except socket.gaierror: + conn.close() + raise ServerNotFoundError("Unable to find the server at %s" % conn.host) + except ssl_SSLError: + conn.close() + raise + except socket.error, e: + err = 0 + if hasattr(e, 'args'): + err = getattr(e, 'args')[0] + else: + err = e.errno + if err == errno.ECONNREFUSED: # Connection refused + raise + except httplib.HTTPException: + # Just because the server closed the connection doesn't apparently mean + # that the server didn't send a response. + if hasattr(conn, 'sock') and conn.sock is None: + if i < RETRIES-1: + conn.close() + conn.connect() + continue + else: + conn.close() + raise + if i < RETRIES-1: + conn.close() + conn.connect() + continue + try: + response = conn.getresponse() + except (socket.error, httplib.HTTPException): + if i < RETRIES-1: + conn.close() + conn.connect() + continue + else: + conn.close() + raise + else: + content = "" + if method == "HEAD": + conn.close() + else: + content = response.read() + response = Response(response) + if method != "HEAD": + content = _decompressContent(response, content) + break + return (response, content) + + + def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): + """Do the actual request using the connection object + and also follow one level of redirects if necessary""" + + auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] + auth = auths and sorted(auths)[0][1] or None + if auth: + auth.request(method, request_uri, headers, body) + + (response, content) = self._conn_request(conn, request_uri, method, body, headers) + + if auth: + if auth.response(response, body): + auth.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers ) + response._stale_digest = 1 + + if response.status == 401: + for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): + authorization.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) + if response.status != 401: + self.authorizations.append(authorization) + authorization.response(response, body) + break + + if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): + if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: + # Pick out the location header and basically start from the beginning + # remembering first to strip the ETag header and decrement our 'depth' + if redirections: + if not response.has_key('location') and response.status != 300: + raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) + # Fix-up relative redirects (which violate an RFC 2616 MUST) + if response.has_key('location'): + location = response['location'] + (scheme, authority, path, query, fragment) = parse_uri(location) + if authority == None: + response['location'] = urlparse.urljoin(absolute_uri, location) + if response.status == 301 and method in ["GET", "HEAD"]: + response['-x-permanent-redirect-url'] = response['location'] + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + if headers.has_key('if-none-match'): + del headers['if-none-match'] + if headers.has_key('if-modified-since'): + del headers['if-modified-since'] + if 'authorization' in headers and not self.forward_authorization_headers: + del headers['authorization'] + if response.has_key('location'): + location = response['location'] + old_response = copy.deepcopy(response) + if not old_response.has_key('content-location'): + old_response['content-location'] = absolute_uri + redirect_method = method + if response.status in [302, 303]: + redirect_method = "GET" + body = None + (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) + response.previous = old_response + else: + raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content) + elif response.status in [200, 203] and method in ["GET", "HEAD"]: + # Don't cache 206's since we aren't going to handle byte range requests + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + + return (response, content) + + def _normalize_headers(self, headers): + return _normalize_headers(headers) + +# Need to catch and rebrand some exceptions +# Then need to optionally turn all exceptions into status codes +# including all socket.* and httplib.* exceptions. + + + def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): + """ Performs a single HTTP request. + + The 'uri' is the URI of the HTTP resource and can begin with either + 'http' or 'https'. The value of 'uri' must be an absolute URI. + + The 'method' is the HTTP method to perform, such as GET, POST, DELETE, + etc. There is no restriction on the methods allowed. + + The 'body' is the entity body to be sent with the request. It is a + string object. + + Any extra headers that are to be sent with the request should be + provided in the 'headers' dictionary. + + The maximum number of redirect to follow before raising an + exception is 'redirections. The default is 5. + + The return value is a tuple of (response, content), the first + being and instance of the 'Response' class, the second being + a string that contains the response entity body. + """ + try: + if headers is None: + headers = {} + else: + headers = self._normalize_headers(headers) + + if not headers.has_key('user-agent'): + headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__ + + uri = iri2uri(uri) + + (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) + domain_port = authority.split(":")[0:2] + if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http': + scheme = 'https' + authority = domain_port[0] + + proxy_info = self._get_proxy_info(scheme, authority) + + conn_key = scheme+":"+authority + if conn_key in self.connections: + conn = self.connections[conn_key] + else: + if not connection_type: + connection_type = SCHEME_TO_CONNECTION[scheme] + certs = list(self.certificates.iter(authority)) + if scheme == 'https': + if certs: + conn = self.connections[conn_key] = connection_type( + authority, key_file=certs[0][0], + cert_file=certs[0][1], timeout=self.timeout, + proxy_info=proxy_info, + ca_certs=self.ca_certs, + disable_ssl_certificate_validation= + self.disable_ssl_certificate_validation) + else: + conn = self.connections[conn_key] = connection_type( + authority, timeout=self.timeout, + proxy_info=proxy_info, + ca_certs=self.ca_certs, + disable_ssl_certificate_validation= + self.disable_ssl_certificate_validation) + else: + conn = self.connections[conn_key] = connection_type( + authority, timeout=self.timeout, + proxy_info=proxy_info) + conn.set_debuglevel(debuglevel) + + if 'range' not in headers and 'accept-encoding' not in headers: + headers['accept-encoding'] = 'gzip, deflate' + + info = email.Message.Message() + cached_value = None + if self.cache: + cachekey = defrag_uri + cached_value = self.cache.get(cachekey) + if cached_value: + # info = email.message_from_string(cached_value) + # + # Need to replace the line above with the kludge below + # to fix the non-existent bug not fixed in this + # bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html + try: + info, content = cached_value.split('\r\n\r\n', 1) + feedparser = email.FeedParser.FeedParser() + feedparser.feed(info) + info = feedparser.close() + feedparser._parse = None + except (IndexError, ValueError): + self.cache.delete(cachekey) + cachekey = None + cached_value = None + else: + cachekey = None + + if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: + # http://www.w3.org/1999/04/Editing/ + headers['if-match'] = info['etag'] + + if method not in ["GET", "HEAD"] and self.cache and cachekey: + # RFC 2616 Section 13.10 + self.cache.delete(cachekey) + + # Check the vary header in the cache to see if this request + # matches what varies in the cache. + if method in ['GET', 'HEAD'] and 'vary' in info: + vary = info['vary'] + vary_headers = vary.lower().replace(' ', '').split(',') + for header in vary_headers: + key = '-varied-%s' % header + value = info[key] + if headers.get(header, None) != value: + cached_value = None + break + + if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: + if info.has_key('-x-permanent-redirect-url'): + # Should cached permanent redirects be counted in our redirection count? For now, yes. + if redirections <= 0: + raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "") + (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) + response.previous = Response(info) + response.previous.fromcache = True + else: + # Determine our course of action: + # Is the cached entry fresh or stale? + # Has the client requested a non-cached response? + # + # There seems to be three possible answers: + # 1. [FRESH] Return the cache entry w/o doing a GET + # 2. [STALE] Do the GET (but add in cache validators if available) + # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request + entry_disposition = _entry_disposition(info, headers) + + if entry_disposition == "FRESH": + if not cached_value: + info['status'] = '504' + content = "" + response = Response(info) + if cached_value: + response.fromcache = True + return (response, content) + + if entry_disposition == "STALE": + if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: + headers['if-none-match'] = info['etag'] + if info.has_key('last-modified') and not 'last-modified' in headers: + headers['if-modified-since'] = info['last-modified'] + elif entry_disposition == "TRANSPARENT": + pass + + (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + + if response.status == 304 and method == "GET": + # Rewrite the cache entry with the new end-to-end headers + # Take all headers that are in response + # and overwrite their values in info. + # unless they are hop-by-hop, or are listed in the connection header. + + for key in _get_end2end_headers(response): + info[key] = response[key] + merged_response = Response(info) + if hasattr(response, "_stale_digest"): + merged_response._stale_digest = response._stale_digest + _updateCache(headers, merged_response, content, self.cache, cachekey) + response = merged_response + response.status = 200 + response.fromcache = True + + elif response.status == 200: + content = new_content + else: + self.cache.delete(cachekey) + content = new_content + else: + cc = _parse_cache_control(headers) + if cc.has_key('only-if-cached'): + info['status'] = '504' + response = Response(info) + content = "" + else: + (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + except Exception, e: + if self.force_exception_to_status_code: + if isinstance(e, HttpLib2ErrorWithResponse): + response = e.response + content = e.content + response.status = 500 + response.reason = str(e) + elif isinstance(e, socket.timeout): + content = "Request Timeout" + response = Response({ + "content-type": "text/plain", + "status": "408", + "content-length": len(content) + }) + response.reason = "Request Timeout" + else: + content = str(e) + response = Response({ + "content-type": "text/plain", + "status": "400", + "content-length": len(content) + }) + response.reason = "Bad Request" + else: + raise + + + return (response, content) + + def _get_proxy_info(self, scheme, authority): + """Return a ProxyInfo instance (or None) based on the scheme + and authority. + """ + hostname, port = urllib.splitport(authority) + proxy_info = self.proxy_info + if callable(proxy_info): + proxy_info = proxy_info(scheme) + + if (hasattr(proxy_info, 'applies_to') + and not proxy_info.applies_to(hostname)): + proxy_info = None + return proxy_info + + +class Response(dict): + """An object more like email.Message than httplib.HTTPResponse.""" + + """Is this response from our local cache""" + fromcache = False + + """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ + version = 11 + + "Status code returned by server. " + status = 200 + + """Reason phrase returned by server.""" + reason = "Ok" + + previous = None + + def __init__(self, info): + # info is either an email.Message or + # an httplib.HTTPResponse object. + if isinstance(info, httplib.HTTPResponse): + for key, value in info.getheaders(): + self[key.lower()] = value + self.status = info.status + self['status'] = str(self.status) + self.reason = info.reason + self.version = info.version + elif isinstance(info, email.Message.Message): + for key, value in info.items(): + self[key.lower()] = value + self.status = int(self['status']) + else: + for key, value in info.iteritems(): + self[key.lower()] = value + self.status = int(self.get('status', self.status)) + self.reason = self.get('reason', self.reason) + + + def __getattr__(self, name): + if name == 'dict': + return self + else: + raise AttributeError, name diff --git a/awx/lib/site-packages/httplib2/cacerts.txt b/awx/lib/site-packages/httplib2/cacerts.txt new file mode 100644 index 0000000000..d8a0027cc7 --- /dev/null +++ b/awx/lib/site-packages/httplib2/cacerts.txt @@ -0,0 +1,739 @@ +# Certifcate Authority certificates for validating SSL connections. +# +# This file contains PEM format certificates generated from +# http://mxr.mozilla.org/seamonkey/source/security/nss/lib/ckfw/builtins/certdata.txt +# +# ***** BEGIN LICENSE BLOCK ***** +# Version: MPL 1.1/GPL 2.0/LGPL 2.1 +# +# The contents of this file are subject to the Mozilla Public License Version +# 1.1 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. +# +# The Original Code is the Netscape security libraries. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1994-2000 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# +# Alternatively, the contents of this file may be used under the terms of +# either the GNU General Public License Version 2 or later (the "GPL"), or +# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +# in which case the provisions of the GPL or the LGPL are applicable instead +# of those above. If you wish to allow use of your version of this file only +# under the terms of either the GPL or the LGPL, and not to allow others to +# use your version of this file under the terms of the MPL, indicate your +# decision by deleting the provisions above and replace them with the notice +# and other provisions required by the GPL or the LGPL. If you do not delete +# the provisions above, a recipient may use your version of this file under +# the terms of any one of the MPL, the GPL or the LGPL. +# +# ***** END LICENSE BLOCK ***** + +Verisign/RSA Secure Server CA +============================= + +-----BEGIN CERTIFICATE----- +MIICNDCCAaECEAKtZn5ORf5eV288mBle3cAwDQYJKoZIhvcNAQECBQAwXzELMAkG +A1UEBhMCVVMxIDAeBgNVBAoTF1JTQSBEYXRhIFNlY3VyaXR5LCBJbmMuMS4wLAYD +VQQLEyVTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk0 +MTEwOTAwMDAwMFoXDTEwMDEwNzIzNTk1OVowXzELMAkGA1UEBhMCVVMxIDAeBgNV +BAoTF1JTQSBEYXRhIFNlY3VyaXR5LCBJbmMuMS4wLAYDVQQLEyVTZWN1cmUgU2Vy +dmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGbMA0GCSqGSIb3DQEBAQUAA4GJ +ADCBhQJ+AJLOesGugz5aqomDV6wlAXYMra6OLDfO6zV4ZFQD5YRAUcm/jwjiioII +0haGN1XpsSECrXZogZoFokvJSyVmIlZsiAeP94FZbYQHZXATcXY+m3dM41CJVphI +uR2nKRoTLkoRWZweFdVJVCxzOmmCsZc5nG1wZ0jl3S3WyB57AgMBAAEwDQYJKoZI +hvcNAQECBQADfgBl3X7hsuyw4jrg7HFGmhkRuNPHoLQDQCYCPgmc4RKz0Vr2N6W3 +YQO2WxZpO8ZECAyIUwxrl0nHPjXcbLm7qt9cuzovk2C2qUtN8iD3zV9/ZHuO3ABc +1/p3yjkWWW8O6tO1g39NTUJWdrTJXwT4OPjr0l91X817/OWOgHz8UA== +-----END CERTIFICATE----- + +Thawte Personal Basic CA +======================== + +-----BEGIN CERTIFICATE----- +MIIDITCCAoqgAwIBAgIBADANBgkqhkiG9w0BAQQFADCByzELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMRowGAYD +VQQKExFUaGF3dGUgQ29uc3VsdGluZzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBT +ZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFBlcnNvbmFsIEJhc2lj +IENBMSgwJgYJKoZIhvcNAQkBFhlwZXJzb25hbC1iYXNpY0B0aGF3dGUuY29tMB4X +DTk2MDEwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgcsxCzAJBgNVBAYTAlpBMRUw +EwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEaMBgGA1UE +ChMRVGhhd3RlIENvbnN1bHRpbmcxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2Vy +dmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQZXJzb25hbCBCYXNpYyBD +QTEoMCYGCSqGSIb3DQEJARYZcGVyc29uYWwtYmFzaWNAdGhhd3RlLmNvbTCBnzAN +BgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAvLyTU23AUE+CFeZIlDWmWr5vQvoPR+53 +dXLdjUmbllegeNTKP1GzaQuRdhciB5dqxFGTS+CN7zeVoQxN2jSQHReJl+A1OFdK +wPQIcOk8RHtQfmGakOMj04gRRif1CwcOu93RfyAKiLlWCy4cgNrx454p7xS9CkT7 +G1sY0b8jkyECAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQF +AAOBgQAt4plrsD16iddZopQBHyvdEktTwq1/qqcAXJFAVyVKOKqEcLnZgA+le1z7 +c8a914phXAPjLSeoF+CEhULcXpvGt7Jtu3Sv5D/Lp7ew4F2+eIMllNLbgQ95B21P +9DkVWlIBe94y1k049hJcBlDfBVu9FEuh3ym6O0GN92NWod8isQ== +-----END CERTIFICATE----- + +Thawte Personal Premium CA +========================== + +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIBADANBgkqhkiG9w0BAQQFADCBzzELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMRowGAYD +VQQKExFUaGF3dGUgQ29uc3VsdGluZzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBT +ZXJ2aWNlcyBEaXZpc2lvbjEjMCEGA1UEAxMaVGhhd3RlIFBlcnNvbmFsIFByZW1p +dW0gQ0ExKjAoBgkqhkiG9w0BCQEWG3BlcnNvbmFsLXByZW1pdW1AdGhhd3RlLmNv +bTAeFw05NjAxMDEwMDAwMDBaFw0yMDEyMzEyMzU5NTlaMIHPMQswCQYDVQQGEwJa +QTEVMBMGA1UECBMMV2VzdGVybiBDYXBlMRIwEAYDVQQHEwlDYXBlIFRvd24xGjAY +BgNVBAoTEVRoYXd0ZSBDb25zdWx0aW5nMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9u +IFNlcnZpY2VzIERpdmlzaW9uMSMwIQYDVQQDExpUaGF3dGUgUGVyc29uYWwgUHJl +bWl1bSBDQTEqMCgGCSqGSIb3DQEJARYbcGVyc29uYWwtcHJlbWl1bUB0aGF3dGUu +Y29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDJZtn4B0TPuYwu8KHvE0Vs +Bd/eJxZRNkERbGw77f4QfRKe5ZtCmv5gMcNmt3M6SK5O0DI3lIi1DbbZ8/JE2dWI +Et12TfIa/G8jHnrx2JhFTgcQ7xZC0EN1bUre4qrJMf8fAHB8Zs8QJQi6+u4A6UYD +ZicRFTuqW/KY3TZCstqIdQIDAQABoxMwETAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBAUAA4GBAGk2ifc0KjNyL2071CKyuG+axTZmDhs8obF1Wub9NdP4qPIH +b4Vnjt4rueIXsDqg8A6iAJrf8xQVbrvIhVqYgPn/vnQdPfP+MCXRNzRn+qVxeTBh +KXLA4CxM+1bkOqhv5TJZUtt1KFBZDPgLGeSs2a+WjS9Q2wfD6h+rM+D1KzGJ +-----END CERTIFICATE----- + +Thawte Personal Freemail CA +=========================== + +-----BEGIN CERTIFICATE----- +MIIDLTCCApagAwIBAgIBADANBgkqhkiG9w0BAQQFADCB0TELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMRowGAYD +VQQKExFUaGF3dGUgQ29uc3VsdGluZzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBT +ZXJ2aWNlcyBEaXZpc2lvbjEkMCIGA1UEAxMbVGhhd3RlIFBlcnNvbmFsIEZyZWVt +YWlsIENBMSswKQYJKoZIhvcNAQkBFhxwZXJzb25hbC1mcmVlbWFpbEB0aGF3dGUu +Y29tMB4XDTk2MDEwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgdExCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEa +MBgGA1UEChMRVGhhd3RlIENvbnN1bHRpbmcxKDAmBgNVBAsTH0NlcnRpZmljYXRp +b24gU2VydmljZXMgRGl2aXNpb24xJDAiBgNVBAMTG1RoYXd0ZSBQZXJzb25hbCBG +cmVlbWFpbCBDQTErMCkGCSqGSIb3DQEJARYccGVyc29uYWwtZnJlZW1haWxAdGhh +d3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA1GnX1LCUZFtx6UfY +DFG26nKRsIRefS0Nj3sS34UldSh0OkIsYyeflXtL734Zhx2G6qPduc6WZBrCFG5E +rHzmj+hND3EfQDimAKOHePb5lIZererAXnbr2RSjXW56fAylS1V/Bhkpf56aJtVq +uzgkCGqYx7Hao5iR/Xnb5VrEHLkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zAN +BgkqhkiG9w0BAQQFAAOBgQDH7JJ+Tvj1lqVnYiqk8E0RYNBvjWBYYawmu1I1XAjP +MPuoSpaKH2JCI4wXD/S6ZJwXrEcp352YXtJsYHFcoqzceePnbgBHH7UNKOgCneSa +/RP0ptl8sfjcXyMmCZGAc9AUG95DqYMl8uacLxXK/qarigd1iwzdUYRr5PjRznei +gQ== +-----END CERTIFICATE----- + +Thawte Server CA +================ + +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD +VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm +MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx +MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3 +dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl +cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3 +DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD +gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91 +yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX +L+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGj +EzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG +7oWDTSEwjsrZqG9JGubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6e +QNuozDJ0uW8NxuOzRAvZim+aKZuZGCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZ +qdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== + +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD +VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFByZW1pdW0gU2Vy +dmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZlckB0aGF3dGUuY29t +MB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYTAlpB +MRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsG +A1UEChMUVGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRp +b24gU2VydmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNl +cnZlciBDQTEoMCYGCSqGSIb3DQEJARYZcHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNv +bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2aovXwlue2oFBYo847kkE +VdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIhUdib0GfQ +ug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMR +uHM/qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG +9w0BAQQFAAOBgQAmSCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUI +hfzJATj/Tb7yFkJD57taRvvBxhEf8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JM +pAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7tUCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= + +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV +UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy +dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1 +MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx +dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B +AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f +BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A +cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC +AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ +MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm +aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw +ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj +IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF +MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA +A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y +7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh +1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4 +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority +======================================================= + +-----BEGIN CERTIFICATE----- +MIICPTCCAaYCEQDNun9W8N/kvFT+IqyzcqpVMA0GCSqGSIb3DQEBAgUAMF8xCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE3MDUGA1UECxMuQ2xh +c3MgMSBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05 +NjAxMjkwMDAwMDBaFw0yODA4MDEyMzU5NTlaMF8xCzAJBgNVBAYTAlVTMRcwFQYD +VQQKEw5WZXJpU2lnbiwgSW5jLjE3MDUGA1UECxMuQ2xhc3MgMSBQdWJsaWMgUHJp +bWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA5Rm/baNWYS2ZSHH2Z965jeu3noaACpEO+jglr0aIguVzqKCbJF0N +H8xlbgyw0FaEGIeaBpsQoXPftFg5a27B9hXVqKg/qhIGjTGsf7A01480Z4gJzRQR +4k5FVmkfeAKA2txHkSm7NsljXMXg1y2He6G3MrB7MLoqLzGq7qNn2tsCAwEAATAN +BgkqhkiG9w0BAQIFAAOBgQBMP7iLxmjf7kMzDl3ppssHhE16M/+SG/Q2rdiVIjZo +EWx8QszznC7EBz8UsA9P/5CSdvnivErpj82ggAr3xSnxgiJduLHdgSOjeyUVRjB5 +FvjqBUuUfx3CHMjjt/QQQDwTw18fU+hI5Ia0e6E1sHslurjTjqs/OJ0ANACY89Fx +lA== +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority +======================================================= + +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEC0b/EoXjaOR6+f/9YtFvgswDQYJKoZIhvcNAQECBQAwXzELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz +cyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2 +MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV +BAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAyIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN +ADCBiQKBgQC2WoujDWojg4BrzzmH9CETMwZMJaLtVRKXxaeAufqDwSCg+i8VDXyh +YGt+eSz6Bg86rvYbb7HS/y8oUl+DfUvEerf4Zh+AVPy3wo5ZShRXRtGak75BkQO7 +FYCTXOvnzAhsPz6zSvz/S2wj1VCCJkQZjiPDceoZJEcEnnW/yKYAHwIDAQABMA0G +CSqGSIb3DQEBAgUAA4GBAIobK/o5wXTXXtgZZKJYSi034DNHD6zt96rbHuSLBlxg +J8pFUs4W7z8GZOeUaHxgMxURaa+dYo2jA1Rrpr7l7gUYYAS/QoD90KioHgE796Nc +r6Pc5iaAIzy4RHT3Cq5Ji2F4zCS/iIqnDupzGUH9TQPwiNHleI2lKk/2lw0Xd8rY +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= + +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz +cyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2 +MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV +BAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN +ADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhE +BarsAx94f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/is +I19wKTakyYbnsZogy1Olhec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0G +CSqGSIb3DQEBAgUAA4GBALtMEivPLCYATxQT3ab7/AoRhIzzKBxnki98tsX63/Do +lbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59AhWM1pF+NEHJwZRDmJXNyc +AA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2OmufTqj/ZA1k +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G2 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEEzH6qqYPnHTkxD4PTqJkZIwDQYJKoZIhvcNAQEFBQAwgcExCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh +c3MgMSBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy +MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp +emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X +DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw +FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMg +UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo +YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5 +MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB +AQUAA4GNADCBiQKBgQCq0Lq+Fi24g9TK0g+8djHKlNgdk4xWArzZbxpvUjZudVYK +VdPfQ4chEWWKfo+9Id5rMj8bhDSVBZ1BNeuS65bdqlk/AVNtmU/t5eIqWpDBucSm +Fc/IReumXY6cPvBkJHalzasab7bYe1FhbqZ/h8jit+U03EGI6glAvnOSPWvndQID +AQABMA0GCSqGSIb3DQEBBQUAA4GBAKlPww3HZ74sy9mozS11534Vnjty637rXC0J +h9ZrbWB85a7FkCMMXErQr7Fd88e2CtvgFZMN3QO8x3aKtd1Pw5sTdbgBwObJW2ul +uIncrKTdcu1OofdPvAbT6shkdHvClUGcZXNY8ZCaPGqxmMnEh7zPRW1F4m4iP/68 +DzFc6PLZ +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G2 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIDAzCCAmwCEQC5L2DMiJ+hekYJuFtwbIqvMA0GCSqGSIb3DQEBBQUAMIHBMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0Ns +YXNzIDIgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9y +aXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1c3QgTmV0d29yazAe +Fw05ODA1MTgwMDAwMDBaFw0yODA4MDEyMzU5NTlaMIHBMQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGlj +IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMx +KGMpIDE5OTggVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1c3QgTmV0d29yazCBnzANBgkqhkiG9w0B +AQEFAAOBjQAwgYkCgYEAp4gBIXQs5xoD8JjhlzwPIQjxnNuX6Zr8wgQGE75fUsjM +HiwSViy4AWkszJkfrbCWrnkE8hM5wXuYuggs6MKEEyyqaekJ9MepAqRCwiNPStjw +DqL7MWzJ5m+ZJwf15vRMeJ5t60aG+rmGyVTyssSv1EYcWskVMP8NbPUtDm3Of3cC +AwEAATANBgkqhkiG9w0BAQUFAAOBgQByLvl/0fFx+8Se9sVeUYpAmLho+Jscg9ji +nb3/7aHmZuovCfTK1+qlK5X2JGCGTUQug6XELaDTrnhpb3LabK4I8GOSN+a7xDAX +rXfMSTWqz9iP0b63GJZHc2pUIjRkLbYWm1lbtFFZOrMLFPQS32eg9K0yZF6xRnIn +jBJ7xUS0rg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh +c3MgMyBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy +MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp +emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X +DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw +FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMg +UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo +YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5 +MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB +AQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCOFoUgRm1HP9SFIIThbbP4 +pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71lSk8UOg0 +13gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwID +AQABMA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSk +U01UbSuvDV1Ai2TT1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7i +F6YM40AIOw7n60RzKprxaZLvcRTDOaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpY +oJ2daZH9 +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G2 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEDKIjprS9esTR/h/xCA3JfgwDQYJKoZIhvcNAQEFBQAwgcExCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh +c3MgNCBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy +MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp +emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X +DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw +FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgNCBQdWJsaWMg +UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo +YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5 +MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB +AQUAA4GNADCBiQKBgQC68OTP+cSuhVS5B1f5j8V/aBH4xBewRNzjMHPVKmIquNDM +HO0oW369atyzkSTKQWI8/AIBvxwWMZQFl3Zuoq29YRdsTjCG8FE3KlDHqGKB3FtK +qsGgtG7rL+VXxbErQHDbWk2hjh+9Ax/YA9SPTJlxvOKCzFjomDqG04Y48wApHwID +AQABMA0GCSqGSIb3DQEBBQUAA4GBAIWMEsGnuVAVess+rLhDityq3RS6iYF+ATwj +cSGIL4LcY/oCRaxFWdcqWERbt5+BO5JoPeI3JPV7bI92NZYJqFmduc4jq3TWg/0y +cyfYaT5DdPauxYma51N86Xv2S/PBZYPejYqcPIiNOVn8qj8ijaHBZlCBckztImRP +T8qAkbYp +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G3 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN2E1Lm0+afY8wR4 +nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/EbRrsC+MO +8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjV +ojYJrKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjb +PG7PoBMAGrgnoeS+Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP2 +6KbqxzcSXKMpHgLZ2x87tNcPVkeBFQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vr +n5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAq2aN17O6x5q25lXQBfGfMY1a +qtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/Ny9Sn2WCVhDr4 +wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3 +ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrs +pSCAaWihT37ha88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4 +E1Z5T21Q6huwtVexN2ZYI/PcD98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g== +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G3 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVy +aVNpZ24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24s +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNp +Z24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJBgNV +BAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNp +Z24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24g +Q2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArwoNwtUs22e5LeWU +J92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6tW8UvxDO +JxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUY +wZF7C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9o +koqQHgiBVrKtaaNS0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjN +qWm6o+sdDZykIKbBoMXRRkwXbdKsZj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/E +Srg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0JhU8wI1NQ0kdvekhktdmnLfe +xbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf0xwLRtxyID+u +7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU +sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RI +sH/7NiXaldDxJBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTP +cjnhsUPgKM+351psE2tJs//jGHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1 +GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ ++mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd +U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm +NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY +ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ +ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1 +CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq +g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c +2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/ +bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== + +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT +ZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw +MDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj +dXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l +c3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC +UdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc +58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/ +o5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr +aGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA +A4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA +Z70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv +8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= + +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBT +ZWN1cmUgZUJ1c2luZXNzIENBLTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQw +MDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5j +LjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENBLTEwgZ8wDQYJ +KoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ1MRo +RvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBu +WqDZQu4aIZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKw +Env+j6YDAgMBAAGjZjBkMBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFEp4MlIR21kWNl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRK +eDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQFAAOBgQB1W6ibAxHm6VZM +zfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5lSE/9dR+ +WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN +/Bf+KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 2 +============================= + +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2Vj +dXJlIGVCdXNpbmVzcyBDQS0yMB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0 +NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkVxdWlmYXggU2VjdXJlMSYwJAYD +VQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCBnzANBgkqhkiG9w0B +AQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn2Z0G +vxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/ +BPO3QSQ5BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0C +AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEX +MBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJl +IGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTkw +NjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9euSBIplBq +y/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQF +MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA +A4GBAAyGgq3oThr1jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy +0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1 +E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUmV+GRMOrN +-----END CERTIFICATE----- + +Thawte Time Stamping CA +======================= + +-----BEGIN CERTIFICATE----- +MIICoTCCAgqgAwIBAgIBADANBgkqhkiG9w0BAQQFADCBizELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTEUMBIGA1UEBxMLRHVyYmFudmlsbGUxDzAN +BgNVBAoTBlRoYXd0ZTEdMBsGA1UECxMUVGhhd3RlIENlcnRpZmljYXRpb24xHzAd +BgNVBAMTFlRoYXd0ZSBUaW1lc3RhbXBpbmcgQ0EwHhcNOTcwMTAxMDAwMDAwWhcN +MjAxMjMxMjM1OTU5WjCBizELMAkGA1UEBhMCWkExFTATBgNVBAgTDFdlc3Rlcm4g +Q2FwZTEUMBIGA1UEBxMLRHVyYmFudmlsbGUxDzANBgNVBAoTBlRoYXd0ZTEdMBsG +A1UECxMUVGhhd3RlIENlcnRpZmljYXRpb24xHzAdBgNVBAMTFlRoYXd0ZSBUaW1l +c3RhbXBpbmcgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYrWHhhRYZT +6jR7UZztsOYuGA7+4F+oJ9O0yeB8WU4WDnNUYMF/9p8u6TqFJBU820cEY8OexJQa +Wt9MevPZQx08EHp5JduQ/vBR5zDWQQD9nyjfeb6Uu522FOMjhdepQeBMpHmwKxqL +8vg7ij5FrHGSALSQQZj7X+36ty6K+Ig3AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEEBQADgYEAZ9viwuaHPUCDhjc1fR/OmsMMZiCouqoEiYbC +9RAIDb/LogWK0E02PvTX72nGXuSwlG9KuefeW4i2e9vjJ+V2w/A1wcu1J5szedyQ +pgCed/r8zSeUQhac0xxo7L9c3eWpexAKMnRUEzGLhQOEkbdYATAUOK8oyvyxUBkZ +CayJSdM= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== + +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ + +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +Entrust.net Secure Server Certification Authority +================================================= + +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMC +VVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5u +ZXQvQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMc +KGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5u +ZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05OTA1 +MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIGA1UE +ChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5j +b3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUg +U2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQaO2f55M28Qpku0f1BBc/ +I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5gXpa0zf3 +wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OC +AdcwggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHb +oIHYpIHVMIHSMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5 +BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p +dHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1pdGVk +MTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp +b24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0 +MFqBDzIwMTkwNTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8Bdi +E1U9s/8KAGv7UISX8+1i0BowHQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAa +MAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EABAwwChsEVjQuMAMCBJAwDQYJKoZI +hvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyNEwr75Ji174z4xRAN +95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9n9cd +2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Go Daddy Certification Authority Root Certificate Bundle +======================================================== + +-----BEGIN CERTIFICATE----- +MIIE3jCCA8agAwIBAgICAwEwDQYJKoZIhvcNAQEFBQAwYzELMAkGA1UEBhMCVVMx +ITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMTYw +MTU0MzdaFw0yNjExMTYwMTU0MzdaMIHKMQswCQYDVQQGEwJVUzEQMA4GA1UECBMH +QXJpem9uYTETMBEGA1UEBxMKU2NvdHRzZGFsZTEaMBgGA1UEChMRR29EYWRkeS5j +b20sIEluYy4xMzAxBgNVBAsTKmh0dHA6Ly9jZXJ0aWZpY2F0ZXMuZ29kYWRkeS5j +b20vcmVwb3NpdG9yeTEwMC4GA1UEAxMnR28gRGFkZHkgU2VjdXJlIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5MREwDwYDVQQFEwgwNzk2OTI4NzCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAMQt1RWMnCZM7DI161+4WQFapmGBWTtwY6vj3D3H +KrjJM9N55DrtPDAjhI6zMBS2sofDPZVUBJ7fmd0LJR4h3mUpfjWoqVTr9vcyOdQm +VZWt7/v+WIbXnvQAjYwqDL1CBM6nPwT27oDyqu9SoWlm2r4arV3aLGbqGmu75RpR +SgAvSMeYddi5Kcju+GZtCpyz8/x4fKL4o/K1w/O5epHBp+YlLpyo7RJlbmr2EkRT +cDCVw5wrWCs9CHRK8r5RsL+H0EwnWGu1NcWdrxcx+AuP7q2BNgWJCJjPOq8lh8BJ +6qf9Z/dFjpfMFDniNoW1fho3/Rb2cRGadDAW/hOUoz+EDU8CAwEAAaOCATIwggEu +MB0GA1UdDgQWBBT9rGEyk2xF1uLuhV+auud2mWjM5zAfBgNVHSMEGDAWgBTSxLDS +kdRMEXGzYcs9of7dqGrU4zASBgNVHRMBAf8ECDAGAQH/AgEAMDMGCCsGAQUFBwEB +BCcwJTAjBggrBgEFBQcwAYYXaHR0cDovL29jc3AuZ29kYWRkeS5jb20wRgYDVR0f +BD8wPTA7oDmgN4Y1aHR0cDovL2NlcnRpZmljYXRlcy5nb2RhZGR5LmNvbS9yZXBv +c2l0b3J5L2dkcm9vdC5jcmwwSwYDVR0gBEQwQjBABgRVHSAAMDgwNgYIKwYBBQUH +AgEWKmh0dHA6Ly9jZXJ0aWZpY2F0ZXMuZ29kYWRkeS5jb20vcmVwb3NpdG9yeTAO +BgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBANKGwOy9+aG2Z+5mC6IG +OgRQjhVyrEp0lVPLN8tESe8HkGsz2ZbwlFalEzAFPIUyIXvJxwqoJKSQ3kbTJSMU +A2fCENZvD117esyfxVgqwcSeIaha86ykRvOe5GPLL5CkKSkB2XIsKd83ASe8T+5o +0yGPwLPk9Qnt0hCqU7S+8MxZC9Y7lhyVJEnfzuz9p0iRFEUOOjZv2kWzRaJBydTX +RE4+uXR21aITVSzGh6O1mawGhId/dQb8vxRMDsxuxN89txJx9OjxUUAiKEngHUuH +qDTMBqLdElrRhjZkAzVvb3du6/KFUJheqwNTrZEjYx8WnM25sgVjOuH0aBsXBTWV +U+4= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIE+zCCBGSgAwIBAgICAQ0wDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1Zh +bGlDZXJ0IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIElu +Yy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24g +QXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAe +BgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTA0MDYyOTE3MDYyMFoX +DTI0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBE +YWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3MgMiBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggENADCCAQgC +ggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+q +N1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiO +r18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lN +f4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+YihfukEH +U1jPEX44dMX4/7VpkI+EdOqXG68CAQOjggHhMIIB3TAdBgNVHQ4EFgQU0sSw0pHU +TBFxs2HLPaH+3ahq1OMwgdIGA1UdIwSByjCBx6GBwaSBvjCBuzEkMCIGA1UEBxMb +VmFsaUNlcnQgVmFsaWRhdGlvbiBOZXR3b3JrMRcwFQYDVQQKEw5WYWxpQ2VydCwg +SW5jLjE1MDMGA1UECxMsVmFsaUNlcnQgQ2xhc3MgMiBQb2xpY3kgVmFsaWRhdGlv +biBBdXRob3JpdHkxITAfBgNVBAMTGGh0dHA6Ly93d3cudmFsaWNlcnQuY29tLzEg +MB4GCSqGSIb3DQEJARYRaW5mb0B2YWxpY2VydC5jb22CAQEwDwYDVR0TAQH/BAUw +AwEB/zAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly9vY3NwLmdv +ZGFkZHkuY29tMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jZXJ0aWZpY2F0ZXMu +Z29kYWRkeS5jb20vcmVwb3NpdG9yeS9yb290LmNybDBLBgNVHSAERDBCMEAGBFUd +IAAwODA2BggrBgEFBQcCARYqaHR0cDovL2NlcnRpZmljYXRlcy5nb2RhZGR5LmNv +bS9yZXBvc2l0b3J5MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOBgQC1 +QPmnHfbq/qQaQlpE9xXUhUaJwL6e4+PrxeNYiY+Sn1eocSxI0YGyeR+sBjUZsE4O +WBsUs5iB0QQeyAfJg594RAoYC5jcdnplDQ1tgMQLARzLrUc+cb53S8wGd9D0Vmsf +SxOaFIqII6hR8INMqzW/Rn453HWkrugp++85j09VZw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0 +IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz +BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y +aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG +9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMTk1NFoXDTE5MDYy +NjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y +azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw +Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl +cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDOOnHK5avIWZJV16vY +dA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVCCSRrCl6zfN1SLUzm1NZ9 +WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7RfZHM047QS +v4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9v +UJSZSWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTu +IYEZoDJJKPTEjlbVUjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwC +W/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd +-----END CERTIFICATE----- + +GeoTrust Global CA +================== + +-----BEGIN CERTIFICATE----- +MIIDfTCCAuagAwIBAgIDErvmMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNVBAYTAlVT +MRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0 +aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDIwNTIxMDQwMDAwWhcNMTgwODIxMDQwMDAw +WjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UE +AxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9m +OSm9BXiLnTjoBbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIu +T8rxh0PBFpVXLVDviS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6c +JmTM386DGXHKTubU1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmR +Cw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5asz +PeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo4HwMIHtMB8GA1UdIwQYMBaAFEjm +aPkr0rKV10fYIyAQTzOYkJ/UMB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrM +TjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjA6BgNVHR8EMzAxMC+g +LaArhilodHRwOi8vY3JsLmdlb3RydXN0LmNvbS9jcmxzL3NlY3VyZWNhLmNybDBO +BgNVHSAERzBFMEMGBFUdIAAwOzA5BggrBgEFBQcCARYtaHR0cHM6Ly93d3cuZ2Vv +dHJ1c3QuY29tL3Jlc291cmNlcy9yZXBvc2l0b3J5MA0GCSqGSIb3DQEBBQUAA4GB +AHbhEm5OSxYShjAGsoEIz/AIx8dxfmbuwu3UOx//8PDITtZDOLC5MH0Y0FWDomrL +NhGc6Ehmo21/uBPUR/6LWlxz/K7ZGzIZOKuXNBSqltLroxwUCEm2u+WR74M26x1W +b8ravHNjkOR/ez4iyz0H7V84dJzjA1BOoa+Y7mHyhD8S +-----END CERTIFICATE----- + diff --git a/awx/lib/site-packages/httplib2/iri2uri.py b/awx/lib/site-packages/httplib2/iri2uri.py new file mode 100644 index 0000000000..d88c91fdfb --- /dev/null +++ b/awx/lib/site-packages/httplib2/iri2uri.py @@ -0,0 +1,110 @@ +""" +iri2uri + +Converts an IRI to a URI. + +""" +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [] +__version__ = "1.0.0" +__license__ = "MIT" +__history__ = """ +""" + +import urlparse + + +# Convert an IRI to a URI following the rules in RFC 3987 +# +# The characters we need to enocde and escape are defined in the spec: +# +# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD +# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF +# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD +# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD +# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD +# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD +# / %xD0000-DFFFD / %xE1000-EFFFD + +escape_range = [ + (0xA0, 0xD7FF), + (0xE000, 0xF8FF), + (0xF900, 0xFDCF), + (0xFDF0, 0xFFEF), + (0x10000, 0x1FFFD), + (0x20000, 0x2FFFD), + (0x30000, 0x3FFFD), + (0x40000, 0x4FFFD), + (0x50000, 0x5FFFD), + (0x60000, 0x6FFFD), + (0x70000, 0x7FFFD), + (0x80000, 0x8FFFD), + (0x90000, 0x9FFFD), + (0xA0000, 0xAFFFD), + (0xB0000, 0xBFFFD), + (0xC0000, 0xCFFFD), + (0xD0000, 0xDFFFD), + (0xE1000, 0xEFFFD), + (0xF0000, 0xFFFFD), + (0x100000, 0x10FFFD), +] + +def encode(c): + retval = c + i = ord(c) + for low, high in escape_range: + if i < low: + break + if i >= low and i <= high: + retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')]) + break + return retval + + +def iri2uri(uri): + """Convert an IRI to a URI. Note that IRIs must be + passed in a unicode strings. That is, do not utf-8 encode + the IRI before passing it into the function.""" + if isinstance(uri ,unicode): + (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) + authority = authority.encode('idna') + # For each character in 'ucschar' or 'iprivate' + # 1. encode as utf-8 + # 2. then %-encode each octet of that utf-8 + uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) + uri = "".join([encode(c) for c in uri]) + return uri + +if __name__ == "__main__": + import unittest + + class Test(unittest.TestCase): + + def test_uris(self): + """Test that URIs are invariant under the transformation.""" + invariant = [ + u"ftp://ftp.is.co.za/rfc/rfc1808.txt", + u"http://www.ietf.org/rfc/rfc2396.txt", + u"ldap://[2001:db8::7]/c=GB?objectClass?one", + u"mailto:John.Doe@example.com", + u"news:comp.infosystems.www.servers.unix", + u"tel:+1-816-555-1212", + u"telnet://192.0.2.16:80/", + u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] + for uri in invariant: + self.assertEqual(uri, iri2uri(uri)) + + def test_iri(self): + """ Test that the right type of escaping is done for each part of the URI.""" + self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}")) + self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}")) + self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}")) + self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) + self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) + + unittest.main() + + diff --git a/awx/lib/site-packages/httplib2/socks.py b/awx/lib/site-packages/httplib2/socks.py new file mode 100644 index 0000000000..0991f4cf6e --- /dev/null +++ b/awx/lib/site-packages/httplib2/socks.py @@ -0,0 +1,438 @@ +"""SocksiPy - Python SOCKS module. +Version 1.00 + +Copyright 2006 Dan-Haim. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of Dan Haim nor the names of his contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA +OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. + + +This module provides a standard socket-like interface for Python +for tunneling connections through SOCKS proxies. + +""" + +""" + +Minor modifications made by Christopher Gilbert (http://motomastyle.com/) +for use in PyLoris (http://pyloris.sourceforge.net/) + +Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/) +mainly to merge bug fixes found in Sourceforge + +""" + +import base64 +import socket +import struct +import sys + +if getattr(socket, 'socket', None) is None: + raise ImportError('socket.socket missing, proxy support unusable') + +PROXY_TYPE_SOCKS4 = 1 +PROXY_TYPE_SOCKS5 = 2 +PROXY_TYPE_HTTP = 3 +PROXY_TYPE_HTTP_NO_TUNNEL = 4 + +_defaultproxy = None +_orgsocket = socket.socket + +class ProxyError(Exception): pass +class GeneralProxyError(ProxyError): pass +class Socks5AuthError(ProxyError): pass +class Socks5Error(ProxyError): pass +class Socks4Error(ProxyError): pass +class HTTPError(ProxyError): pass + +_generalerrors = ("success", + "invalid data", + "not connected", + "not available", + "bad proxy type", + "bad input") + +_socks5errors = ("succeeded", + "general SOCKS server failure", + "connection not allowed by ruleset", + "Network unreachable", + "Host unreachable", + "Connection refused", + "TTL expired", + "Command not supported", + "Address type not supported", + "Unknown error") + +_socks5autherrors = ("succeeded", + "authentication is required", + "all offered authentication methods were rejected", + "unknown username or invalid password", + "unknown error") + +_socks4errors = ("request granted", + "request rejected or failed", + "request rejected because SOCKS server cannot connect to identd on the client", + "request rejected because the client program and identd report different user-ids", + "unknown error") + +def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None): + """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) + Sets a default proxy which all further socksocket objects will use, + unless explicitly changed. + """ + global _defaultproxy + _defaultproxy = (proxytype, addr, port, rdns, username, password) + +def wrapmodule(module): + """wrapmodule(module) + Attempts to replace a module's socket library with a SOCKS socket. Must set + a default proxy using setdefaultproxy(...) first. + This will only work on modules that import socket directly into the namespace; + most of the Python Standard Library falls into this category. + """ + if _defaultproxy != None: + module.socket.socket = socksocket + else: + raise GeneralProxyError((4, "no proxy specified")) + +class socksocket(socket.socket): + """socksocket([family[, type[, proto]]]) -> socket object + Open a SOCKS enabled socket. The parameters are the same as + those of the standard socket init. In order for SOCKS to work, + you must specify family=AF_INET, type=SOCK_STREAM and proto=0. + """ + + def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None): + _orgsocket.__init__(self, family, type, proto, _sock) + if _defaultproxy != None: + self.__proxy = _defaultproxy + else: + self.__proxy = (None, None, None, None, None, None) + self.__proxysockname = None + self.__proxypeername = None + self.__httptunnel = True + + def __recvall(self, count): + """__recvall(count) -> data + Receive EXACTLY the number of bytes requested from the socket. + Blocks until the required number of bytes have been received. + """ + data = self.recv(count) + while len(data) < count: + d = self.recv(count-len(data)) + if not d: raise GeneralProxyError((0, "connection closed unexpectedly")) + data = data + d + return data + + def sendall(self, content, *args): + """ override socket.socket.sendall method to rewrite the header + for non-tunneling proxies if needed + """ + if not self.__httptunnel: + content = self.__rewriteproxy(content) + return super(socksocket, self).sendall(content, *args) + + def __rewriteproxy(self, header): + """ rewrite HTTP request headers to support non-tunneling proxies + (i.e. those which do not support the CONNECT method). + This only works for HTTP (not HTTPS) since HTTPS requires tunneling. + """ + host, endpt = None, None + hdrs = header.split("\r\n") + for hdr in hdrs: + if hdr.lower().startswith("host:"): + host = hdr + elif hdr.lower().startswith("get") or hdr.lower().startswith("post"): + endpt = hdr + if host and endpt: + hdrs.remove(host) + hdrs.remove(endpt) + host = host.split(" ")[1] + endpt = endpt.split(" ") + if (self.__proxy[4] != None and self.__proxy[5] != None): + hdrs.insert(0, self.__getauthheader()) + hdrs.insert(0, "Host: %s" % host) + hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2])) + return "\r\n".join(hdrs) + + def __getauthheader(self): + auth = self.__proxy[4] + ":" + self.__proxy[5] + return "Proxy-Authorization: Basic " + base64.b64encode(auth) + + def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None): + """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) + Sets the proxy to be used. + proxytype - The type of the proxy to be used. Three types + are supported: PROXY_TYPE_SOCKS4 (including socks4a), + PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP + addr - The address of the server (IP or DNS). + port - The port of the server. Defaults to 1080 for SOCKS + servers and 8080 for HTTP proxy servers. + rdns - Should DNS queries be preformed on the remote side + (rather than the local side). The default is True. + Note: This has no effect with SOCKS4 servers. + username - Username to authenticate with to the server. + The default is no authentication. + password - Password to authenticate with to the server. + Only relevant when username is also provided. + """ + self.__proxy = (proxytype, addr, port, rdns, username, password) + + def __negotiatesocks5(self, destaddr, destport): + """__negotiatesocks5(self,destaddr,destport) + Negotiates a connection through a SOCKS5 server. + """ + # First we'll send the authentication packages we support. + if (self.__proxy[4]!=None) and (self.__proxy[5]!=None): + # The username/password details were supplied to the + # setproxy method so we support the USERNAME/PASSWORD + # authentication (in addition to the standard none). + self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02)) + else: + # No username/password were entered, therefore we + # only support connections with no authentication. + self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00)) + # We'll receive the server's response to determine which + # method was selected + chosenauth = self.__recvall(2) + if chosenauth[0:1] != chr(0x05).encode(): + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + # Check the chosen authentication method + if chosenauth[1:2] == chr(0x00).encode(): + # No authentication is required + pass + elif chosenauth[1:2] == chr(0x02).encode(): + # Okay, we need to perform a basic username/password + # authentication. + self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5]) + authstat = self.__recvall(2) + if authstat[0:1] != chr(0x01).encode(): + # Bad response + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + if authstat[1:2] != chr(0x00).encode(): + # Authentication failed + self.close() + raise Socks5AuthError((3, _socks5autherrors[3])) + # Authentication succeeded + else: + # Reaching here is always bad + self.close() + if chosenauth[1] == chr(0xFF).encode(): + raise Socks5AuthError((2, _socks5autherrors[2])) + else: + raise GeneralProxyError((1, _generalerrors[1])) + # Now we can request the actual connection + req = struct.pack('BBB', 0x05, 0x01, 0x00) + # If the given destination address is an IP address, we'll + # use the IPv4 address request even if remote resolving was specified. + try: + ipaddr = socket.inet_aton(destaddr) + req = req + chr(0x01).encode() + ipaddr + except socket.error: + # Well it's not an IP number, so it's probably a DNS name. + if self.__proxy[3]: + # Resolve remotely + ipaddr = None + req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr + else: + # Resolve locally + ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) + req = req + chr(0x01).encode() + ipaddr + req = req + struct.pack(">H", destport) + self.sendall(req) + # Get the response + resp = self.__recvall(4) + if resp[0:1] != chr(0x05).encode(): + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + elif resp[1:2] != chr(0x00).encode(): + # Connection failed + self.close() + if ord(resp[1:2])<=8: + raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])])) + else: + raise Socks5Error((9, _socks5errors[9])) + # Get the bound address/port + elif resp[3:4] == chr(0x01).encode(): + boundaddr = self.__recvall(4) + elif resp[3:4] == chr(0x03).encode(): + resp = resp + self.recv(1) + boundaddr = self.__recvall(ord(resp[4:5])) + else: + self.close() + raise GeneralProxyError((1,_generalerrors[1])) + boundport = struct.unpack(">H", self.__recvall(2))[0] + self.__proxysockname = (boundaddr, boundport) + if ipaddr != None: + self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) + else: + self.__proxypeername = (destaddr, destport) + + def getproxysockname(self): + """getsockname() -> address info + Returns the bound IP address and port number at the proxy. + """ + return self.__proxysockname + + def getproxypeername(self): + """getproxypeername() -> address info + Returns the IP and port number of the proxy. + """ + return _orgsocket.getpeername(self) + + def getpeername(self): + """getpeername() -> address info + Returns the IP address and port number of the destination + machine (note: getproxypeername returns the proxy) + """ + return self.__proxypeername + + def __negotiatesocks4(self,destaddr,destport): + """__negotiatesocks4(self,destaddr,destport) + Negotiates a connection through a SOCKS4 server. + """ + # Check if the destination address provided is an IP address + rmtrslv = False + try: + ipaddr = socket.inet_aton(destaddr) + except socket.error: + # It's a DNS name. Check where it should be resolved. + if self.__proxy[3]: + ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01) + rmtrslv = True + else: + ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) + # Construct the request packet + req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr + # The username parameter is considered userid for SOCKS4 + if self.__proxy[4] != None: + req = req + self.__proxy[4] + req = req + chr(0x00).encode() + # DNS name if remote resolving is required + # NOTE: This is actually an extension to the SOCKS4 protocol + # called SOCKS4A and may not be supported in all cases. + if rmtrslv: + req = req + destaddr + chr(0x00).encode() + self.sendall(req) + # Get the response from the server + resp = self.__recvall(8) + if resp[0:1] != chr(0x00).encode(): + # Bad data + self.close() + raise GeneralProxyError((1,_generalerrors[1])) + if resp[1:2] != chr(0x5A).encode(): + # Server returned an error + self.close() + if ord(resp[1:2]) in (91, 92, 93): + self.close() + raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90])) + else: + raise Socks4Error((94, _socks4errors[4])) + # Get the bound address/port + self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0]) + if rmtrslv != None: + self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) + else: + self.__proxypeername = (destaddr, destport) + + def __negotiatehttp(self, destaddr, destport): + """__negotiatehttp(self,destaddr,destport) + Negotiates a connection through an HTTP server. + """ + # If we need to resolve locally, we do this now + if not self.__proxy[3]: + addr = socket.gethostbyname(destaddr) + else: + addr = destaddr + headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"] + headers += ["Host: ", destaddr, "\r\n"] + if (self.__proxy[4] != None and self.__proxy[5] != None): + headers += [self.__getauthheader(), "\r\n"] + headers.append("\r\n") + self.sendall("".join(headers).encode()) + # We read the response until we get the string "\r\n\r\n" + resp = self.recv(1) + while resp.find("\r\n\r\n".encode()) == -1: + resp = resp + self.recv(1) + # We just need the first line to check if the connection + # was successful + statusline = resp.splitlines()[0].split(" ".encode(), 2) + if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()): + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + try: + statuscode = int(statusline[1]) + except ValueError: + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + if statuscode != 200: + self.close() + raise HTTPError((statuscode, statusline[2])) + self.__proxysockname = ("0.0.0.0", 0) + self.__proxypeername = (addr, destport) + + def connect(self, destpair): + """connect(self, despair) + Connects to the specified destination through a proxy. + destpar - A tuple of the IP/DNS address and the port number. + (identical to socket's connect). + To select the proxy server use setproxy(). + """ + # Do a minimal input check first + if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int): + raise GeneralProxyError((5, _generalerrors[5])) + if self.__proxy[0] == PROXY_TYPE_SOCKS5: + if self.__proxy[2] != None: + portnum = self.__proxy[2] + else: + portnum = 1080 + _orgsocket.connect(self, (self.__proxy[1], portnum)) + self.__negotiatesocks5(destpair[0], destpair[1]) + elif self.__proxy[0] == PROXY_TYPE_SOCKS4: + if self.__proxy[2] != None: + portnum = self.__proxy[2] + else: + portnum = 1080 + _orgsocket.connect(self,(self.__proxy[1], portnum)) + self.__negotiatesocks4(destpair[0], destpair[1]) + elif self.__proxy[0] == PROXY_TYPE_HTTP: + if self.__proxy[2] != None: + portnum = self.__proxy[2] + else: + portnum = 8080 + _orgsocket.connect(self,(self.__proxy[1], portnum)) + self.__negotiatehttp(destpair[0], destpair[1]) + elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL: + if self.__proxy[2] != None: + portnum = self.__proxy[2] + else: + portnum = 8080 + _orgsocket.connect(self,(self.__proxy[1],portnum)) + if destpair[1] == 443: + self.__negotiatehttp(destpair[0],destpair[1]) + else: + self.__httptunnel = False + elif self.__proxy[0] == None: + _orgsocket.connect(self, (destpair[0], destpair[1])) + else: + raise GeneralProxyError((4, _generalerrors[4])) diff --git a/awx/lib/site-packages/iso8601/__init__.py b/awx/lib/site-packages/iso8601/__init__.py new file mode 100644 index 0000000000..e72e3563bc --- /dev/null +++ b/awx/lib/site-packages/iso8601/__init__.py @@ -0,0 +1 @@ +from iso8601 import * diff --git a/awx/lib/site-packages/iso8601/iso8601.py b/awx/lib/site-packages/iso8601/iso8601.py new file mode 100644 index 0000000000..f923938b2d --- /dev/null +++ b/awx/lib/site-packages/iso8601/iso8601.py @@ -0,0 +1,102 @@ +"""ISO 8601 date time string parsing + +Basic usage: +>>> import iso8601 +>>> iso8601.parse_date("2007-01-25T12:00:00Z") +datetime.datetime(2007, 1, 25, 12, 0, tzinfo=) +>>> + +""" + +from datetime import datetime, timedelta, tzinfo +import re + +__all__ = ["parse_date", "ParseError"] + +# Adapted from http://delete.me.uk/2005/03/iso8601.html +ISO8601_REGEX = re.compile(r"(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})" + r"((?P.)(?P[0-9]{2}):(?P[0-9]{2})(:(?P[0-9]{2})(\.(?P[0-9]+))?)?" + r"(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?" +) +TIMEZONE_REGEX = re.compile("(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})") + +class ParseError(Exception): + """Raised when there is a problem parsing a date string""" + +# Yoinked from python docs +ZERO = timedelta(0) +class Utc(tzinfo): + """UTC + + """ + def utcoffset(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return ZERO +UTC = Utc() + +class FixedOffset(tzinfo): + """Fixed offset in hours and minutes from UTC + + """ + def __init__(self, offset_hours, offset_minutes, name): + self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes) + self.__name = name + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return self.__name + + def dst(self, dt): + return ZERO + + def __repr__(self): + return "" % self.__name + +def parse_timezone(tzstring, default_timezone=UTC): + """Parses ISO 8601 time zone specs into tzinfo offsets + + """ + if tzstring == "Z": + return default_timezone + # This isn't strictly correct, but it's common to encounter dates without + # timezones so I'll assume the default (which defaults to UTC). + # Addresses issue 4. + if tzstring is None: + return default_timezone + m = TIMEZONE_REGEX.match(tzstring) + prefix, hours, minutes = m.groups() + hours, minutes = int(hours), int(minutes) + if prefix == "-": + hours = -hours + minutes = -minutes + return FixedOffset(hours, minutes, tzstring) + +def parse_date(datestring, default_timezone=UTC): + """Parses ISO 8601 dates into datetime objects + + The timezone is parsed from the date string. However it is quite common to + have dates without a timezone (not strictly correct). In this case the + default timezone specified in default_timezone is used. This is UTC by + default. + """ + if not isinstance(datestring, basestring): + raise ParseError("Expecting a string %r" % datestring) + m = ISO8601_REGEX.match(datestring) + if not m: + raise ParseError("Unable to parse date string %r" % datestring) + groups = m.groupdict() + tz = parse_timezone(groups["timezone"], default_timezone=default_timezone) + if groups["fraction"] is None: + groups["fraction"] = 0 + else: + groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6) + return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]), + int(groups["hour"]), int(groups["minute"]), int(groups["second"]), + int(groups["fraction"]), tz) diff --git a/awx/lib/site-packages/iso8601/test_iso8601.py b/awx/lib/site-packages/iso8601/test_iso8601.py new file mode 100644 index 0000000000..ff9e2731cf --- /dev/null +++ b/awx/lib/site-packages/iso8601/test_iso8601.py @@ -0,0 +1,111 @@ +import iso8601 + +def test_iso8601_regex(): + assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z") + +def test_timezone_regex(): + assert iso8601.TIMEZONE_REGEX.match("+01:00") + assert iso8601.TIMEZONE_REGEX.match("+00:00") + assert iso8601.TIMEZONE_REGEX.match("+01:20") + assert iso8601.TIMEZONE_REGEX.match("-01:00") + +def test_parse_date(): + d = iso8601.parse_date("2006-10-20T15:34:56Z") + assert d.year == 2006 + assert d.month == 10 + assert d.day == 20 + assert d.hour == 15 + assert d.minute == 34 + assert d.second == 56 + assert d.tzinfo == iso8601.UTC + +def test_parse_date_fraction(): + d = iso8601.parse_date("2006-10-20T15:34:56.123Z") + assert d.year == 2006 + assert d.month == 10 + assert d.day == 20 + assert d.hour == 15 + assert d.minute == 34 + assert d.second == 56 + assert d.microsecond == 123000 + assert d.tzinfo == iso8601.UTC + +def test_parse_date_fraction_2(): + """From bug 6 + + """ + d = iso8601.parse_date("2007-5-7T11:43:55.328Z'") + assert d.year == 2007 + assert d.month == 5 + assert d.day == 7 + assert d.hour == 11 + assert d.minute == 43 + assert d.second == 55 + assert d.microsecond == 328000 + assert d.tzinfo == iso8601.UTC + +def test_parse_date_tz(): + d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30") + assert d.year == 2006 + assert d.month == 10 + assert d.day == 20 + assert d.hour == 15 + assert d.minute == 34 + assert d.second == 56 + assert d.microsecond == 123000 + assert d.tzinfo.tzname(None) == "+02:30" + offset = d.tzinfo.utcoffset(None) + assert offset.days == 0 + assert offset.seconds == 60 * 60 * 2.5 + +def test_parse_invalid_date(): + try: + iso8601.parse_date(None) + except iso8601.ParseError: + pass + else: + assert 1 == 2 + +def test_parse_invalid_date2(): + try: + iso8601.parse_date("23") + except iso8601.ParseError: + pass + else: + assert 1 == 2 + +def test_parse_no_timezone(): + """issue 4 - Handle datetime string without timezone + + This tests what happens when you parse a date with no timezone. While not + strictly correct this is quite common. I'll assume UTC for the time zone + in this case. + """ + d = iso8601.parse_date("2007-01-01T08:00:00") + assert d.year == 2007 + assert d.month == 1 + assert d.day == 1 + assert d.hour == 8 + assert d.minute == 0 + assert d.second == 0 + assert d.microsecond == 0 + assert d.tzinfo == iso8601.UTC + +def test_parse_no_timezone_different_default(): + tz = iso8601.FixedOffset(2, 0, "test offset") + d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz) + assert d.tzinfo == tz + +def test_space_separator(): + """Handle a separator other than T + + """ + d = iso8601.parse_date("2007-06-23 06:40:34.00Z") + assert d.year == 2007 + assert d.month == 6 + assert d.day == 23 + assert d.hour == 6 + assert d.minute == 40 + assert d.second == 34 + assert d.microsecond == 0 + assert d.tzinfo == iso8601.UTC diff --git a/awx/lib/site-packages/keyring/__init__.py b/awx/lib/site-packages/keyring/__init__.py new file mode 100644 index 0000000000..bbf968f649 --- /dev/null +++ b/awx/lib/site-packages/keyring/__init__.py @@ -0,0 +1,14 @@ +""" +__init__.py + +Created by Kang Zhang on 2009-07-09 +""" + +from __future__ import absolute_import + +import logging +logger = logging.getLogger('keyring') + +from .core import (set_keyring, get_keyring, set_password, get_password, + delete_password) +from .getpassbackend import get_password as get_pass_get_password diff --git a/awx/lib/site-packages/keyring/backend.py b/awx/lib/site-packages/keyring/backend.py new file mode 100644 index 0000000000..ca017f7b97 --- /dev/null +++ b/awx/lib/site-packages/keyring/backend.py @@ -0,0 +1,127 @@ +""" +Keyring implementation support +""" + +from __future__ import absolute_import + +import abc +import itertools + +from keyring import errors +from keyring.util import properties + +import keyring.util + +class KeyringBackendMeta(abc.ABCMeta): + """ + A metaclass that's both an ABCMeta and a type that keeps a registry of + all (non-abstract) types. + """ + def __init__(cls, name, bases, dict): + super(KeyringBackendMeta, cls).__init__(name, bases, dict) + if not hasattr(cls, '_classes'): + cls._classes = set() + classes = cls._classes + if not cls.__abstractmethods__: + classes.add(cls) + + +class KeyringBackend(object): + """The abstract base class of the keyring, every backend must implement + this interface. + """ + __metaclass__ = KeyringBackendMeta + + #@abc.abstractproperty + def priority(cls): + """ + Each backend class must supply a priority, a number (float or integer) + indicating the priority of the backend relative to all other backends. + The priority need not be static -- it may (and should) vary based + attributes of the environment in which is runs (platform, available + packages, etc.). + + A higher number indicates a higher priority. The priority should raise + a RuntimeError with a message indicating the underlying cause if the + backend is not suitable for the current environment. + + As a rule of thumb, a priority between zero but less than one is + suitable, but a priority of one or greater is recommended. + """ + + @properties.ClassProperty + @classmethod + def viable(cls): + with errors.ExceptionRaisedContext() as exc: + cls.priority + return not bool(exc) + + @abc.abstractmethod + def get_password(self, service, username): + """Get password of the username for the service + """ + return None + + @abc.abstractmethod + def set_password(self, service, username, password): + """Set password for the username of the service + """ + raise errors.PasswordSetError("reason") + + # for backward-compatibility, don't require a backend to implement + # delete_password + #@abc.abstractmethod + def delete_password(self, service, username): + """Delete the password for the username of the service. + """ + raise errors.PasswordDeleteError("reason") + +class Crypter(object): + """Base class providing encryption and decryption + """ + + @abc.abstractmethod + def encrypt(self, value): + """Encrypt the value. + """ + pass + + @abc.abstractmethod + def decrypt(self, value): + """Decrypt the value. + """ + pass + +class NullCrypter(Crypter): + """A crypter that does nothing + """ + + def encrypt(self, value): + return value + + def decrypt(self, value): + return value + +@keyring.util.once +def get_all_keyring(): + """ + Return a list of all implemented keyrings that can be constructed without + parameters. + """ + # ensure that all keyring backends are loaded + for mod_name in ('file', 'Gnome', 'Google', 'keyczar', 'kwallet', 'multi', + 'OS_X', 'pyfs', 'SecretService', 'Windows'): + # use fromlist to cause the module to resolve under Demand Import + __import__('keyring.backends.'+mod_name, fromlist=('__name__',)) + + def is_class_viable(keyring_cls): + try: + keyring_cls.priority + except RuntimeError: + return False + return True + + all_classes = KeyringBackend._classes + viable_classes = itertools.ifilter(is_class_viable, all_classes) + return list(keyring.util.suppress_exceptions(viable_classes, + exceptions=TypeError)) diff --git a/awx/lib/site-packages/keyring/backends/Gnome.py b/awx/lib/site-packages/keyring/backends/Gnome.py new file mode 100644 index 0000000000..00f4ea5e98 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/Gnome.py @@ -0,0 +1,114 @@ +import os + +try: + from gi import Repository + if Repository.get_default().enumerate_versions('GnomeKeyring'): + from gi.repository import GnomeKeyring +except ImportError: + pass + +from keyring.backend import KeyringBackend +from keyring.errors import PasswordSetError, PasswordDeleteError +from keyring.util import properties + +class Keyring(KeyringBackend): + """Gnome Keyring""" + + # Name of the keyring to store the passwords in. + # Use None for the default keyring. + KEYRING_NAME = None + + requisite_vars = [ + 'GNOME_KEYRING_CONTROL', + 'DISPLAY', + 'DBUS_SESSION_BUS_ADDRESS', + ] + + @properties.ClassProperty + @classmethod + def priority(cls): + if 'GnomeKeyring' not in globals(): + raise RuntimeError("GnomeKeyring module required") + if not cls.has_requisite_vars(): + raise RuntimeError("Requisite environment vars are not present") + return int(cls.has_requisite_vars()) + + @classmethod + def has_requisite_vars(cls): + """ + Return True if the requisite environment vars are present in the + environment. + """ + return set(cls.requisite_vars).issubset(os.environ) + + def _find_passwords(self, service, username, deleting=False): + """Get password of the username for the service + """ + passwords = [] + + service = self._safe_string(service) + username = self._safe_string(username) + for attrs_tuple in (('username', 'service'), ('user', 'domain')): + attrs = GnomeKeyring.Attribute.list_new() + GnomeKeyring.Attribute.list_append_string(attrs, attrs_tuple[0], username) + GnomeKeyring.Attribute.list_append_string(attrs, attrs_tuple[1], service) + result, items = GnomeKeyring.find_items_sync( + GnomeKeyring.ItemType.NETWORK_PASSWORD, attrs) + if result == GnomeKeyring.Result.OK: + passwords += items + elif deleting: + if result == GnomeKeyring.Result.CANCELLED: + raise PasswordDeleteError("Cancelled by user") + elif result != GnomeKeyring.Result.NO_MATCH: + raise PasswordDeleteError(result.value_name) + return passwords + + def get_password(self, service, username): + """Get password of the username for the service + """ + items = self._find_passwords(service, username) + if not items: + return None + + secret = items[0].secret + return secret if isinstance(secret, unicode) else secret.decode('utf-8') + + def set_password(self, service, username, password): + """Set password for the username of the service + """ + service = self._safe_string(service) + username = self._safe_string(username) + password = self._safe_string(password) + attrs = GnomeKeyring.Attribute.list_new() + GnomeKeyring.Attribute.list_append_string(attrs, 'username', username) + GnomeKeyring.Attribute.list_append_string(attrs, 'service', service) + GnomeKeyring.Attribute.list_append_string(attrs, 'application', 'python-keyring') + result = GnomeKeyring.item_create_sync( + self.KEYRING_NAME, GnomeKeyring.ItemType.NETWORK_PASSWORD, + "Password for '%s' on '%s'" % (username, service), + attrs, password, True)[0] + if result == GnomeKeyring.Result.CANCELLED: + # The user pressed "Cancel" when prompted to unlock their keyring. + raise PasswordSetError("Cancelled by user") + elif result != GnomeKeyring.Result.OK: + raise PasswordSetError(result.value_name) + + def delete_password(self, service, username): + """Delete the password for the username of the service. + """ + items = self._find_passwords(service, username, deleting=True) + if not items: + raise PasswordDeleteError("Password not found") + for current in items: + result = GnomeKeyring.item_delete_sync(current.keyring, + current.item_id) + if result == GnomeKeyring.Result.CANCELLED: + raise PasswordDeleteError("Cancelled by user") + elif result != GnomeKeyring.Result.OK: + raise PasswordDeleteError(result.value_name) + + def _safe_string(self, source, encoding='utf-8'): + """Convert unicode to string as gnomekeyring barfs on unicode""" + if not isinstance(source, str): + return source.encode(encoding) + return str(source) diff --git a/awx/lib/site-packages/keyring/backends/Google.py b/awx/lib/site-packages/keyring/backends/Google.py new file mode 100644 index 0000000000..6264b3cc38 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/Google.py @@ -0,0 +1,323 @@ +from __future__ import absolute_import + +import os +import sys +import copy +import codecs +import cPickle +import base64 +import io + +try: + import gdata.docs.service +except ImportError: + pass + +from . import keyczar +from keyring import errors +from keyring import credentials +import keyring.py27compat +from keyring.backend import KeyringBackend +from keyring.util import properties +from keyring.errors import ExceptionRaisedContext + +class EnvironCredential(credentials.EnvironCredential): + """Retrieve credentials from specifically named environment variables + """ + + def __init__(self): + super(EnvironCredential, self).__init__('GOOGLE_KEYRING_USER', + 'GOOGLE_KEYRING_PASSWORD') + +class DocsKeyring(KeyringBackend): + """Backend that stores keyring on Google Docs. + Note that login and any other initialisation is deferred until it is + actually required to allow this keyring class to be added to the + global _all_keyring list. + """ + + keyring_title = 'GoogleKeyring' + # status enums + OK = 1 + FAIL = 0 + CONFLICT = -1 + + def __init__(self, credential, source, crypter, + collection=None, client=None, + can_create=True, input_getter=keyring.py27compat.input + ): + self.credential = credential + self.crypter = crypter + self.source = source + self._collection = collection + self.can_create = can_create + self.input_getter = input_getter + self._keyring_dict = None + + if not client: + self._client = gdata.docs.service.DocsService() + else: + self._client = client + + self._client.source = source + self._client.ssl = True + self._login_reqd = True + + @properties.ClassProperty + @classmethod + def priority(cls): + if not cls._has_gdata(): + raise RuntimeError("Requires gdata") + if not keyczar.has_keyczar(): + raise RuntimeError("Requires keyczar") + return 3 + + @classmethod + def _has_gdata(cls): + with ExceptionRaisedContext() as exc: + gdata.__name__ + return not bool(exc) + + def get_password(self, service, username): + """Get password of the username for the service + """ + result = self._get_entry(self._keyring, service, username) + if result: + result = self._decrypt(result) + return result + + def set_password(self, service, username, password): + """Set password for the username of the service + """ + password = self._encrypt(password or '') + keyring_working_copy = copy.deepcopy(self._keyring) + service_entries = keyring_working_copy.get(service) + if not service_entries: + service_entries = {} + keyring_working_copy[service] = service_entries + service_entries[username] = password + save_result = self._save_keyring(keyring_working_copy) + if save_result == self.OK: + self._keyring_dict = keyring_working_copy + return + elif save_result == self.CONFLICT: + # check if we can avoid updating + self.docs_entry, keyring_dict = self._read() + existing_pwd = self._get_entry(self._keyring, service, username) + conflicting_pwd = self._get_entry(keyring_dict, service, username) + if conflicting_pwd == password: + # if someone else updated it to the same value then we are done + self._keyring_dict = keyring_working_copy + return + elif conflicting_pwd is None or conflicting_pwd == existing_pwd: + # if doesn't already exist or is unchanged then update it + new_service_entries = keyring_dict.get(service, {}) + new_service_entries[username] = password + keyring_dict[service] = new_service_entries + save_result = self._save_keyring(keyring_dict) + if save_result == self.OK: + self._keyring_dict = keyring_dict + return + else: + raise errors.PasswordSetError( + 'Failed write after conflict detected') + else: + raise errors.PasswordSetError( + 'Conflict detected, service:%s and username:%s was '\ + 'set to a different value by someone else' %(service, + username)) + + raise errors.PasswordSetError('Could not save keyring') + + def delete_password(self, service, username): + return self._del_entry(self._keyring, service, username) + + @property + def client(self): + if not self._client.GetClientLoginToken(): + try: + self._client.ClientLogin(self.credential.username, + self.credential.password, + self._client.source) + except gdata.service.CaptchaRequired: + sys.stdout.write('Please visit ' + self._client.captcha_url) + answer = self.input_getter('Answer to the challenge? ') + self._client.email = self.credential.username + self._client.password = self.credential.password + self._client.ClientLogin( + self.credential.username, + self.credential.password, + self._client.source, + captcha_token=self._client.captcha_token, + captcha_response=answer) + except gdata.service.BadAuthentication: + raise errors.InitError('Users credential were unrecognized') + except gdata.service.Error: + raise errors.InitError('Login Error') + + return self._client + + @property + def collection(self): + return self._collection or self.credential.username.split('@')[0] + + @property + def _keyring(self): + if self._keyring_dict is None: + self.docs_entry, self._keyring_dict = self._read() + return self._keyring_dict + + def _get_entry(self, keyring_dict, service, username): + result = None + service_entries = keyring_dict.get(service) + if service_entries: + result = service_entries.get(username) + return result + + def _del_entry(self, keyring_dict, service, username): + service_entries = keyring_dict.get(service) + if not service_entries: + raise errors.PasswordDeleteError("No matching service") + try: + del service_entries[username] + except KeyError: + raise errors.PasswordDeleteError("Not found") + if not service_entries: + del keyring_dict[service] + + def _decrypt(self, value): + if not value: + return '' + return self.crypter.decrypt(value) + + def _encrypt(self, value): + if not value: + return '' + return self.crypter.encrypt(value) + + def _get_doc_title(self): + return '%s' %self.keyring_title + + def _read(self): + from gdata.docs.service import DocumentQuery + import gdata + title_query = DocumentQuery(categories=[self.collection]) + title_query['title'] = self._get_doc_title() + title_query['title-exact'] = 'true' + docs = self.client.QueryDocumentListFeed(title_query.ToUri()) + + if not docs.entry: + if self.can_create: + docs_entry = None + keyring_dict = {} + else: + raise errors.InitError( + '%s not found in %s and create not permitted' + %(self._get_doc_title(), self.collection)) + else: + docs_entry = docs.entry[0] + file_contents = '' + try: + url = docs_entry.content.src + url += '&exportFormat=txt' + server_response = self.client.request('GET', url) + if server_response.status != 200: + raise errors.InitError( + 'Could not read existing Google Docs keyring') + file_contents = server_response.read() + if file_contents.startswith(codecs.BOM_UTF8): + file_contents = file_contents[len(codecs.BOM_UTF8):] + keyring_dict = cPickle.loads(base64.urlsafe_b64decode( + file_contents.decode('string-escape'))) + except cPickle.UnpicklingError, ex: + raise errors.InitError( + 'Could not unpickle existing Google Docs keyring', ex) + except TypeError, ex: + raise errors.InitError( + 'Could not decode existing Google Docs keyring', ex) + + return docs_entry, keyring_dict + + def _save_keyring(self, keyring_dict): + """Helper to actually write the keyring to Google""" + import gdata + result = self.OK + file_contents = base64.urlsafe_b64encode(cPickle.dumps(keyring_dict)) + try: + if self.docs_entry: + extra_headers = {'Content-Type': 'text/plain', + 'Content-Length': len(file_contents)} + self.docs_entry = self.client.Put( + file_contents, + self.docs_entry.GetEditMediaLink().href, + extra_headers=extra_headers + ) + else: + from gdata.docs.service import DocumentQuery + # check for existence of folder, create if required + folder_query = DocumentQuery(categories=['folder']) + folder_query['title'] = self.collection + folder_query['title-exact'] = 'true' + docs = self.client.QueryDocumentListFeed(folder_query.ToUri()) + if docs.entry: + folder_entry = docs.entry[0] + else: + folder_entry = self.client.CreateFolder(self.collection) + file_handle = io.BytesIO(file_contents) + media_source = gdata.MediaSource( + file_handle=file_handle, + content_type='text/plain', + content_length=len(file_contents), + file_name='temp') + self.docs_entry = self.client.Upload( + media_source, + self._get_doc_title(), + folder_or_uri=folder_entry + ) + except gdata.service.RequestError, ex: + try: + if ex.message['reason'].lower().find('conflict') != -1: + result = self.CONFLICT + else: + # Google docs has a bug when updating a shared document + # using PUT from any account other that the owner. + # It returns an error 400 "Sorry, there was an error saving the file. Please try again" + # *despite* actually updating the document! + # Workaround by re-reading to see if it actually updated + if ex.message['body'].find( + 'Sorry, there was an error saving the file') != -1: + new_docs_entry, new_keyring_dict = self._read() + if new_keyring_dict == keyring_dict: + result = self.OK + else: + result = self.FAIL + else: + result = self.FAIL + except: + result = self.FAIL + + return result + +class KeyczarDocsKeyring(DocsKeyring): + """Google Docs keyring using keyczar initialized from environment + variables + """ + + def __init__(self): + crypter = keyczar.EnvironCrypter() + credential = EnvironCredential() + source = os.environ.get('GOOGLE_KEYRING_SOURCE') + super(KeyczarDocsKeyring, self).__init__( + credential, source, crypter) + + def supported(self): + """Return if this keyring supports current environment: + -1: not applicable + 0: suitable + 1: recommended + """ + try: + from keyczar import keyczar + return super(KeyczarDocsKeyring, self).supported() + except ImportError: + return -1 diff --git a/awx/lib/site-packages/keyring/backends/OS_X.py b/awx/lib/site-packages/keyring/backends/OS_X.py new file mode 100644 index 0000000000..dc318ae184 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/OS_X.py @@ -0,0 +1,122 @@ +import platform +import subprocess +import re +import binascii + +from keyring.backend import KeyringBackend +from keyring.errors import PasswordSetError +from keyring.errors import PasswordDeleteError +from keyring.util import properties + + +class SecurityCommand(unicode): + """ + A string suitable for passing as the 'command' parameter to the + OS X 'security' command. + """ + def __new__(cls, cmd, store='generic'): + cmd = '%(cmd)s-%(store)s-password' % vars() + return super(SecurityCommand, cls).__new__(cls, cmd) + + +class Keyring(KeyringBackend): + """Mac OS X Keychain""" + + # regex for extracting password from security call + password_regex = re.compile("""password:\s*(?:0x(?P[0-9A-F]+)\s*)?""" + """(?:"(?P.*)")?""") + store = 'generic' + + @properties.ClassProperty + @classmethod + def priority(cls): + """ + Preferred for all OS X environments. + """ + if platform.system() != 'Darwin': + raise RuntimeError("OS X required") + return 5 + + def set_password(self, service, username, password): + if username is None: + username = '' + set_error = PasswordSetError("Can't store password in keychain") + try: + # set up the call for security. + cmd = [ + 'security', + SecurityCommand('add', self.store), + '-a', username, + '-s', service, + '-w', password, + '-U', + ] + call = subprocess.Popen(cmd, stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + stdoutdata, stderrdata = call.communicate() + code = call.returncode + # check return code. + if code is not 0: + raise set_error + except: + raise set_error + + def get_password(self, service, username): + if username is None: + username = '' + try: + # set up the call to security. + cmd = [ + 'security', + SecurityCommand('find', self.store), + '-g', + '-a', username, + '-s', service, + ] + call = subprocess.Popen(cmd, stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + stdoutdata, stderrdata = call.communicate() + code = call.returncode + if code is not 0: + raise OSError("Can't fetch password from system") + output = stderrdata.decode() + # check for empty password. + if output == 'password: \n': + return '' + # search for special password pattern. + matches = Keyring.password_regex.search(output) + if matches: + group_dict = matches.groupdict() + hex = group_dict.get('hex') + pw = group_dict.get('pw') + if hex: + # it's a weird hex password, decode it. + return unicode(binascii.unhexlify(hex), 'utf-8') + else: + # it's a normal password, send it back. + return pw + # nothing was found, it doesn't exist. + except: + pass + + def delete_password(self, service, username): + del_error = PasswordDeleteError("Can't delete password in keychain") + if username is None: + username = '' + try: + cmd = [ + 'security', + SecurityCommand('delete', self.store), + '-a', username, + '-s', service, + ] + # set up the call for security. + call = subprocess.Popen(cmd, stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + stdoutdata, stderrdata = call.communicate() + code = call.returncode + # check return code. + if code is not 0: + raise del_error + except: + raise del_error diff --git a/awx/lib/site-packages/keyring/backends/SecretService.py b/awx/lib/site-packages/keyring/backends/SecretService.py new file mode 100644 index 0000000000..2bdb5f1a98 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/SecretService.py @@ -0,0 +1,76 @@ +import os +import logging + +from keyring.util import properties +from keyring.backend import KeyringBackend +from keyring.errors import (InitError, PasswordDeleteError, + ExceptionRaisedContext) + +try: + import secretstorage.exceptions +except ImportError: + pass + +log = logging.getLogger(__name__) + +class Keyring(KeyringBackend): + """Secret Service Keyring""" + + @properties.ClassProperty + @classmethod + def priority(cls): + with ExceptionRaisedContext() as exc: + secretstorage.__name__ + if exc: + raise RuntimeError("SecretService required") + try: + bus = secretstorage.dbus_init() + secretstorage.Collection(bus) + except secretstorage.exceptions.SecretServiceNotAvailableException: + raise RuntimeError("Unable to get initialize SecretService") + if 'DISPLAY' not in os.environ: + raise RuntimeError("SecretService cannot run without a DISPLAY " + "environment variable") + return 5 + + def get_default_collection(self): + bus = secretstorage.dbus_init() + if hasattr(secretstorage, 'get_default_collection'): + collection = secretstorage.get_default_collection(bus) + else: + collection = secretstorage.Collection(bus) + if collection.is_locked(): + if collection.unlock(): + raise InitError("Failed to unlock the collection!") + return collection + + def get_password(self, service, username): + """Get password of the username for the service + """ + collection = self.get_default_collection() + items = collection.search_items( + {"username": username, "service": service}) + for item in items: + return item.get_secret().decode('utf-8') + + def set_password(self, service, username, password): + """Set password for the username of the service + """ + collection = self.get_default_collection() + attributes = { + "application": "python-keyring", + "service": service, + "username": username + } + label = "Password for '%s' on '%s'" % (username, service) + collection.create_item(label, attributes, password, replace=True) + + def delete_password(self, service, username): + """Delete the stored password (only the first one) + """ + collection = self.get_default_collection() + items = collection.search_items( + {"username": username, "service": service}) + for item in items: + return item.delete() + raise PasswordDeleteError("No such password!") diff --git a/awx/lib/site-packages/keyring/backends/Windows.py b/awx/lib/site-packages/keyring/backends/Windows.py new file mode 100644 index 0000000000..8bf985d04e --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/Windows.py @@ -0,0 +1,233 @@ +import sys +import base64 +import platform + +import keyring.util.escape +from keyring.util import properties +from keyring.backend import KeyringBackend +from keyring.errors import PasswordDeleteError, ExceptionRaisedContext +from . import file + +try: + import pywintypes + import win32cred +except ImportError: + pass + +try: + import winreg +except ImportError: + try: + # Python 2 compatibility + import _winreg as winreg + except ImportError: + pass + +try: + from . import _win_crypto +except ImportError: + pass + +def has_pywin32(): + """ + Does this environment have pywin32? + Should return False even when Mercurial's Demand Import allowed import of + win32cred. + """ + with ExceptionRaisedContext() as exc: + win32cred.__name__ + return not bool(exc) + +def has_wincrypto(): + """ + Does this environment have wincrypto? + Should return False even when Mercurial's Demand Import allowed import of + _win_crypto, so accesses an attribute of the module. + """ + with ExceptionRaisedContext() as exc: + _win_crypto.__name__ + return not bool(exc) + +class EncryptedKeyring(file.BaseKeyring): + """ + A File-based keyring secured by Windows Crypto API. + """ + + @properties.ClassProperty + @classmethod + def priority(self): + """ + Preferred over file.EncryptedKeyring but not other, more sophisticated + Windows backends. + """ + if not platform.system() == 'Windows': + raise RuntimeError("Requires Windows") + return .8 + + filename = 'wincrypto_pass.cfg' + + def encrypt(self, password): + """Encrypt the password using the CryptAPI. + """ + return _win_crypto.encrypt(password) + + def decrypt(self, password_encrypted): + """Decrypt the password using the CryptAPI. + """ + return _win_crypto.decrypt(password_encrypted) + + +class WinVaultKeyring(KeyringBackend): + """ + WinVaultKeyring stores encrypted passwords using the Windows Credential + Manager. + + Requires pywin32 + + This backend does some gymnastics to simulate multi-user support, + which WinVault doesn't support natively. See + https://bitbucket.org/kang/python-keyring-lib/issue/47/winvaultkeyring-only-ever-returns-last#comment-731977 + for details on the implementation, but here's the gist: + + Passwords are stored under the service name unless there is a collision + (another password with the same service name but different user name), + in which case the previous password is moved into a compound name: + {username}@{service} + """ + + @properties.ClassProperty + @classmethod + def priority(cls): + """ + If available, the preferred backend on Windows. + """ + if not has_pywin32(): + raise RuntimeError("Requires Windows and pywin32") + return 5 + + @staticmethod + def _compound_name(username, service): + return keyring.util.escape.u('%(username)s@%(service)s') % vars() + + def get_password(self, service, username): + # first attempt to get the password under the service name + res = self._get_password(service) + if not res or res['UserName'] != username: + # It wasn't found so attempt to get it with the compound name + res = self._get_password(self._compound_name(username, service)) + if not res: + return None + blob = res['CredentialBlob'] + return blob.decode('utf-16') + + def _get_password(self, target): + try: + res = win32cred.CredRead( + Type=win32cred.CRED_TYPE_GENERIC, + TargetName=target, + ) + except pywintypes.error: + e = sys.exc_info()[1] + if e.winerror == 1168 and e.funcname == 'CredRead': # not found + return None + raise + return res + + def set_password(self, service, username, password): + existing_pw = self._get_password(service) + if existing_pw: + # resave the existing password using a compound target + existing_username = existing_pw['UserName'] + target = self._compound_name(existing_username, service) + self._set_password(target, existing_username, + existing_pw['CredentialBlob'].decode('utf-16')) + self._set_password(service, username, unicode(password)) + + def _set_password(self, target, username, password): + credential = dict(Type=win32cred.CRED_TYPE_GENERIC, + TargetName=target, + UserName=username, + CredentialBlob=password, + Comment="Stored using python-keyring", + Persist=win32cred.CRED_PERSIST_ENTERPRISE) + win32cred.CredWrite(credential, 0) + + def delete_password(self, service, username): + compound = self._compound_name(username, service) + deleted = False + for target in service, compound: + existing_pw = self._get_password(target) + if existing_pw and existing_pw['UserName'] == username: + deleted = True + self._delete_password(target) + if not deleted: + raise PasswordDeleteError(service) + + def _delete_password(self, target): + win32cred.CredDelete( + Type=win32cred.CRED_TYPE_GENERIC, + TargetName=target, + ) + + +class RegistryKeyring(KeyringBackend): + """ + RegistryKeyring is a keyring which use Windows CryptAPI to encrypt + the user's passwords and store them under registry keys + """ + + @properties.ClassProperty + @classmethod + def priority(self): + """ + Preferred on Windows when pywin32 isn't installed + """ + if platform.system() != 'Windows': + raise RuntimeError("Requires Windows") + if not has_wincrypto(): + raise RuntimeError("Requires ctypes") + return 2 + + def get_password(self, service, username): + """Get password of the username for the service + """ + try: + # fetch the password + key = r'Software\%s\Keyring' % service + hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key) + password_saved = winreg.QueryValueEx(hkey, username)[0] + password_base64 = password_saved.encode('ascii') + # decode with base64 + password_encrypted = base64.decodestring(password_base64) + # decrypted the password + password = _win_crypto.decrypt(password_encrypted).decode('utf-8') + except EnvironmentError: + password = None + return password + + def set_password(self, service, username, password): + """Write the password to the registry + """ + # encrypt the password + password_encrypted = _win_crypto.encrypt(password.encode('utf-8')) + # encode with base64 + password_base64 = base64.encodestring(password_encrypted) + # encode again to unicode + password_saved = password_base64.decode('ascii') + + # store the password + key_name = r'Software\%s\Keyring' % service + hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, key_name) + winreg.SetValueEx(hkey, username, 0, winreg.REG_SZ, password_saved) + + def delete_password(self, service, username): + """Delete the password for the username of the service. + """ + try: + key_name = r'Software\%s\Keyring' % service + hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_name, 0, + winreg.KEY_ALL_ACCESS) + winreg.DeleteValue(hkey, username) + except WindowsError: + e = sys.exc_info()[1] + raise PasswordDeleteError(e) diff --git a/awx/lib/site-packages/keyring/backends/__init__.py b/awx/lib/site-packages/keyring/backends/__init__.py new file mode 100644 index 0000000000..792d600548 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/__init__.py @@ -0,0 +1 @@ +# diff --git a/awx/lib/site-packages/keyring/backends/_win_crypto.py b/awx/lib/site-packages/keyring/backends/_win_crypto.py new file mode 100644 index 0000000000..fb7f927d1a --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/_win_crypto.py @@ -0,0 +1,101 @@ + +from ctypes import Structure, POINTER, c_void_p, cast, create_string_buffer, \ + c_char_p, byref, memmove +from ctypes import windll, WinDLL, WINFUNCTYPE +try: + from ctypes import wintypes +except ValueError: + # see http://bugs.python.org/issue16396 + raise ImportError("wintypes") + +from keyring.util.escape import u + +# Crypto API ctypes bindings + +class DATA_BLOB(Structure): + _fields_ = [('cbData', wintypes.DWORD), + ('pbData', POINTER(wintypes.BYTE))] + + +class CRYPTPROTECT_PROMPTSTRUCT(Structure): + _fields_ = [('cbSize', wintypes.DWORD), + ('dwPromptFlags', wintypes.DWORD), + ('hwndApp', wintypes.HWND), + ('szPrompt', POINTER(wintypes.WCHAR))] + +# Flags for CRYPTPROTECT_PROMPTSTRUCT + +CRYPTPROTECT_PROMPT_ON_UNPROTECT = 1 +CRYPTPROTECT_PROMPT_ON_PROTECT = 2 + +# Flags for CryptProtectData/CryptUnprotectData + +CRYPTPROTECT_UI_FORBIDDEN = 0x01 +CRYPTPROTECT_LOCAL_MACHINE = 0x04 +CRYPTPROTECT_CRED_SYNC = 0x08 +CRYPTPROTECT_AUDIT = 0x10 +CRYPTPROTECT_NO_RECOVERY = 0x20 +CRYPTPROTECT_VERIFY_PROTECTION = 0x40 +CRYPTPROTECT_CRED_REGENERATE = 0x80 + +# Crypto API Functions + +_dll = WinDLL('CRYPT32.DLL') + +CryptProtectData = WINFUNCTYPE(wintypes.BOOL, + POINTER(DATA_BLOB), + POINTER(wintypes.WCHAR), + POINTER(DATA_BLOB), + c_void_p, + POINTER(CRYPTPROTECT_PROMPTSTRUCT), + wintypes.DWORD, + POINTER(DATA_BLOB))(('CryptProtectData', _dll)) + +CryptUnprotectData = WINFUNCTYPE(wintypes.BOOL, + POINTER(DATA_BLOB), + POINTER(wintypes.WCHAR), + POINTER(DATA_BLOB), + c_void_p, + POINTER(CRYPTPROTECT_PROMPTSTRUCT), + wintypes.DWORD, POINTER(DATA_BLOB))( + ('CryptUnprotectData', _dll)) + +# Functions + + +def encrypt(data, non_interactive=0): + blobin = DATA_BLOB(cbData=len(data), + pbData=cast(c_char_p(data), + POINTER(wintypes.BYTE))) + blobout = DATA_BLOB() + + if not CryptProtectData(byref(blobin), + u('python-keyring-lib.win32crypto'), + None, None, None, + CRYPTPROTECT_UI_FORBIDDEN, + byref(blobout)): + raise OSError("Can't encrypt") + + encrypted = create_string_buffer(blobout.cbData) + memmove(encrypted, blobout.pbData, blobout.cbData) + windll.kernel32.LocalFree(blobout.pbData) + return encrypted.raw + + +def decrypt(encrypted, non_interactive=0): + blobin = DATA_BLOB(cbData=len(encrypted), + pbData=cast(c_char_p(encrypted), + POINTER(wintypes.BYTE))) + blobout = DATA_BLOB() + + if not CryptUnprotectData(byref(blobin), + u('python-keyring-lib.win32crypto'), + None, None, None, + CRYPTPROTECT_UI_FORBIDDEN, + byref(blobout)): + raise OSError("Can't decrypt") + + data = create_string_buffer(blobout.cbData) + memmove(data, blobout.pbData, blobout.cbData) + windll.kernel32.LocalFree(blobout.pbData) + return data.raw diff --git a/awx/lib/site-packages/keyring/backends/file.py b/awx/lib/site-packages/keyring/backends/file.py new file mode 100644 index 0000000000..10e7532948 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/file.py @@ -0,0 +1,288 @@ +from __future__ import with_statement + +import os +import getpass +import base64 +import sys +import json +import abc + +from ..py27compat import configparser + +import keyring.util.platform_ +from keyring.errors import PasswordDeleteError +from keyring.backend import KeyringBackend +from keyring.util import properties +from keyring.util.escape import escape as escape_for_ini + +class BaseKeyring(KeyringBackend): + """ + BaseKeyring is a file-based implementation of keyring. + + This keyring stores the password directly in the file and provides methods + which may be overridden by subclasses to support + encryption and decryption. The encrypted payload is stored in base64 + format. + """ + + @properties.NonDataProperty + def file_path(self): + """ + The path to the file where passwords are stored. This property + may be overridden by the subclass or at the instance level. + """ + return os.path.join(keyring.util.platform_.data_root(), self.filename) + + @abc.abstractproperty + def filename(self): + """ + The filename used to store the passwords. + """ + + @abc.abstractmethod + def encrypt(self, password): + """ + Given a password (byte string), return an encrypted byte string. + """ + + @abc.abstractmethod + def decrypt(self, password_encrypted): + """ + Given a password encrypted by a previous call to `encrypt`, return + the original byte string. + """ + + def get_password(self, service, username): + """ + Read the password from the file. + """ + service = escape_for_ini(service) + username = escape_for_ini(username) + + # load the passwords from the file + config = configparser.RawConfigParser() + if os.path.exists(self.file_path): + config.read(self.file_path) + + # fetch the password + try: + password_base64 = config.get(service, username).encode() + # decode with base64 + password_encrypted = base64.decodestring(password_base64) + # decrypted the password + password = self.decrypt(password_encrypted).decode('utf-8') + except (configparser.NoOptionError, configparser.NoSectionError): + password = None + return password + + def set_password(self, service, username, password): + """Write the password in the file. + """ + service = escape_for_ini(service) + username = escape_for_ini(username) + + # encrypt the password + password_encrypted = self.encrypt(password.encode('utf-8')) + # encode with base64 + password_base64 = base64.encodestring(password_encrypted).decode() + + # ensure the file exists + self._ensure_file_path() + + # load the keyring from the disk + config = configparser.RawConfigParser() + config.read(self.file_path) + + # update the keyring with the password + if not config.has_section(service): + config.add_section(service) + config.set(service, username, password_base64) + + # save the keyring back to the file + config_file = open(self.file_path, 'w') + try: + config.write(config_file) + finally: + config_file.close() + + def _ensure_file_path(self): + """ + Ensure the storage path exists. + If it doesn't, create it with "go-rwx" permissions. + """ + storage_root = os.path.dirname(self.file_path) + if storage_root and not os.path.isdir(storage_root): + os.makedirs(storage_root) + if not os.path.isfile(self.file_path): + # create the file without group/world permissions + with open(self.file_path, 'w'): + pass + user_read_write = 0600 + os.chmod(self.file_path, user_read_write) + + def delete_password(self, service, username): + """Delete the password for the username of the service. + """ + service = escape_for_ini(service) + config = configparser.RawConfigParser() + if os.path.exists(self.file_path): + config.read(self.file_path) + if not config.remove_section(service): + raise PasswordDeleteError("Password not found") + # update the file + config_file = open(self.file_path, 'w') + config.write(config_file) + +class PlaintextKeyring(BaseKeyring): + """Simple File Keyring with no encryption""" + + priority = .5 + "Applicable for all platforms, but not recommended" + + filename = 'keyring_pass.cfg' + + def encrypt(self, password): + """Directly return the password itself. + """ + return password + + def decrypt(self, password_encrypted): + """Directly return encrypted password. + """ + return password_encrypted + +class EncryptedKeyring(BaseKeyring): + """PyCrypto File Keyring""" + + # a couple constants + block_size = 32 + pad_char = '0' + + filename = 'crypted_pass.cfg' + pw_prefix = 'pw:'.encode() + + @properties.ClassProperty + @classmethod + def priority(self): + "Applicable for all platforms, but not recommended." + try: + __import__('Crypto.Cipher.AES') + __import__('Crypto.Protocol.KDF') + __import__('Crypto.Random') + except ImportError: + raise RuntimeError("PyCrypto required") + if not json: + raise RuntimeError("JSON implementation such as simplejson " + "required.") + return .6 + + @properties.NonDataProperty + def keyring_key(self): + # _unlock or _init_file will set the key or raise an exception + if self._check_file(): + self._unlock() + else: + self._init_file() + return self.keyring_key + + def _get_new_password(self): + while True: + password = getpass.getpass( + "Please set a password for your new keyring: ") + confirm = getpass.getpass('Please confirm the password: ') + if password != confirm: + sys.stderr.write("Error: Your passwords didn't match\n") + continue + if '' == password.strip(): + # forbid the blank password + sys.stderr.write("Error: blank passwords aren't allowed.\n") + continue + return password + + def _init_file(self): + """ + Initialize a new password file and set the reference password. + """ + self.keyring_key = self._get_new_password() + # set a reference password, used to check that the password provided + # matches for subsequent checks. + self.set_password('keyring-setting', 'password reference', + 'password reference value') + + def _check_file(self): + """ + Check if the file exists and has the expected password reference. + """ + if not os.path.exists(self.file_path): + return False + self._migrate() + config = configparser.RawConfigParser() + config.read(self.file_path) + try: + config.get( + escape_for_ini('keyring-setting'), + escape_for_ini('password reference'), + ) + except (configparser.NoSectionError, configparser.NoOptionError): + return False + return True + + def _unlock(self): + """ + Unlock this keyring by getting the password for the keyring from the + user. + """ + self.keyring_key = getpass.getpass( + 'Please enter password for encrypted keyring: ') + try: + ref_pw = self.get_password('keyring-setting', 'password reference') + assert ref_pw == 'password reference value' + except AssertionError: + self._lock() + raise ValueError("Incorrect Password") + + def _lock(self): + """ + Remove the keyring key from this instance. + """ + del self.keyring_key + + def _create_cipher(self, password, salt, IV): + """ + Create the cipher object to encrypt or decrypt a payload. + """ + from Crypto.Protocol.KDF import PBKDF2 + from Crypto.Cipher import AES + pw = PBKDF2(password, salt, dkLen=self.block_size) + return AES.new(pw[:self.block_size], AES.MODE_CFB, IV) + + def encrypt(self, password): + from Crypto.Random import get_random_bytes + salt = get_random_bytes(self.block_size) + from Crypto.Cipher import AES + IV = get_random_bytes(AES.block_size) + cipher = self._create_cipher(self.keyring_key, salt, IV) + password_encrypted = cipher.encrypt(self.pw_prefix + password) + # Serialize the salt, IV, and encrypted password in a secure format + data = dict( + salt=salt, IV=IV, password_encrypted=password_encrypted, + ) + for key in data: + data[key] = base64.encodestring(data[key]).decode() + return json.dumps(data).encode() + + def decrypt(self, password_encrypted): + # unpack the encrypted payload + data = json.loads(password_encrypted.decode()) + for key in data: + data[key] = base64.decodestring(data[key].encode()) + cipher = self._create_cipher(self.keyring_key, data['salt'], + data['IV']) + plaintext = cipher.decrypt(data['password_encrypted']) + assert plaintext.startswith(self.pw_prefix) + return plaintext[3:] + + def _migrate(self, keyring_password=None): + """ + Convert older keyrings to the current format. + """ diff --git a/awx/lib/site-packages/keyring/backends/keyczar.py b/awx/lib/site-packages/keyring/backends/keyczar.py new file mode 100644 index 0000000000..b3878c9cea --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/keyczar.py @@ -0,0 +1,99 @@ +from __future__ import absolute_import + +import os +import abc + +try: + from keyczar import keyczar +except ImportError: + pass + +from keyring.backend import Crypter +from keyring import errors + +def has_keyczar(): + with errors.ExceptionRaisedContext() as exc: + keyczar.__name__ + return not bool(exc) + +class BaseCrypter(Crypter): + """Base Keyczar keyset encryption and decryption. + The keyset initialisation is deferred until required. + """ + + @abc.abstractproperty + def keyset_location(self): + """Location for the main keyset that may be encrypted or not""" + pass + + @abc.abstractproperty + def encrypting_keyset_location(self): + """Location for the encrypting keyset. + Use None to indicate that the main keyset is not encrypted + """ + pass + + @property + def crypter(self): + """The actual keyczar crypter""" + if not hasattr(self, '_crypter'): + # initialise the Keyczar keysets + if not self.keyset_location: + raise ValueError('No encrypted keyset location!') + reader = keyczar.readers.CreateReader(self.keyset_location) + if self.encrypting_keyset_location: + encrypting_keyczar = keyczar.Crypter.Read( + self.encrypting_keyset_location) + reader = keyczar.readers.EncryptedReader(reader, + encrypting_keyczar) + self._crypter = keyczar.Crypter(reader) + return self._crypter + + def encrypt(self, value): + """Encrypt the value. + """ + if not value: + return '' + return self.crypter.Encrypt(value) + + def decrypt(self, value): + """Decrypt the value. + """ + if not value: + return '' + return self.crypter.Decrypt(value) + +class Crypter(BaseCrypter): + """A Keyczar crypter using locations specified in the constructor + """ + + def __init__(self, keyset_location, encrypting_keyset_location=None): + self._keyset_location = keyset_location + self._encrypting_keyset_location = encrypting_keyset_location + + @property + def keyset_location(self): + return self._keyset_location + + @property + def encrypting_keyset_location(self): + return self._encrypting_keyset_location + +class EnvironCrypter(BaseCrypter): + """A Keyczar crypter using locations specified by environment vars + """ + + KEYSET_ENV_VAR = 'KEYRING_KEYCZAR_ENCRYPTED_LOCATION' + ENC_KEYSET_ENV_VAR = 'KEYRING_KEYCZAR_ENCRYPTING_LOCATION' + + @property + def keyset_location(self): + val = os.environ.get(self.KEYSET_ENV_VAR) + if not val: + raise ValueError('%s environment value not set' % + self.KEYSET_ENV_VAR) + return val + + @property + def encrypting_keyset_location(self): + return os.environ.get(self.ENC_KEYSET_ENV_VAR) diff --git a/awx/lib/site-packages/keyring/backends/kwallet.py b/awx/lib/site-packages/keyring/backends/kwallet.py new file mode 100644 index 0000000000..15825bf687 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/kwallet.py @@ -0,0 +1,100 @@ +import os + +from keyring.backend import KeyringBackend +from keyring.errors import PasswordDeleteError +from keyring.errors import PasswordSetError, ExceptionRaisedContext +from keyring.util import properties + +try: + from PyKDE4.kdeui import KWallet + from PyQt4 import QtGui +except ImportError: + pass + +kwallet = None + +def open_kwallet(kwallet_module=None, qt_module=None): + + # If we specified the kwallet_module and/or qt_module, surely we won't need + # the cached kwallet object... + if kwallet_module is None and qt_module is None: + global kwallet + if not kwallet is None: + return kwallet + + # Allow for the injection of module-like objects for testing purposes. + if kwallet_module is None: + kwallet_module = KWallet.Wallet + if qt_module is None: + qt_module = QtGui + + # KDE wants us to instantiate an application object. + app = None + if qt_module.qApp.instance() == None: + app = qt_module.QApplication([]) + try: + window = qt_module.QWidget() + kwallet = kwallet_module.openWallet( + kwallet_module.NetworkWallet(), + window.winId(), + kwallet_module.Synchronous) + if kwallet is not None: + if not kwallet.hasFolder('Python'): + kwallet.createFolder('Python') + kwallet.setFolder('Python') + return kwallet + finally: + if app: + app.exit() + +class Keyring(KeyringBackend): + """KDE KWallet""" + + @properties.ClassProperty + @classmethod + def priority(cls): + with ExceptionRaisedContext() as exc: + KWallet.__name__ + if exc: + raise RuntimeError("KDE libraries not available") + if 'KDE_SESSION_ID' not in os.environ: + return 0 + return 5 + + def get_password(self, service, username): + """Get password of the username for the service + """ + key = username + '@' + service + network = KWallet.Wallet.NetworkWallet() + wallet = open_kwallet() + if wallet is None: + # the user pressed "cancel" when prompted to unlock their keyring. + return None + if wallet.keyDoesNotExist(network, 'Python', key): + return None + + result = wallet.readPassword(key)[1] + # The string will be a PyQt4.QtCore.QString, so turn it into a unicode + # object. + return unicode(result) + + def set_password(self, service, username, password): + """Set password for the username of the service + """ + wallet = open_kwallet() + if wallet is None: + # the user pressed "cancel" when prompted to unlock their keyring. + raise PasswordSetError("Cancelled by user") + wallet.writePassword(username+'@'+service, password) + + def delete_password(self, service, username): + """Delete the password for the username of the service. + """ + key = username + '@' + service + wallet = open_kwallet() + if wallet is None: + # the user pressed "cancel" when prompted to unlock their keyring. + raise PasswordDeleteError("Cancelled by user") + if wallet.keyDoesNotExist(wallet.walletName(), 'Python', key): + raise PasswordDeleteError("Password not found") + wallet.removeEntry(key) diff --git a/awx/lib/site-packages/keyring/backends/multi.py b/awx/lib/site-packages/keyring/backends/multi.py new file mode 100644 index 0000000000..303e4a27b6 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/multi.py @@ -0,0 +1,63 @@ +import itertools + +from keyring.util import properties +from keyring.backend import KeyringBackend +from keyring import errors + +class MultipartKeyringWrapper(KeyringBackend): + + """A wrapper around an existing keyring that breaks the password into + smaller parts to handle implementations that have limits on the maximum + length of passwords i.e. Windows Vault + """ + + def __init__(self, keyring, max_password_size=512): + self._keyring = keyring + self._max_password_size = max_password_size + + @properties.ClassProperty + @classmethod + def priority(cls): + return 0 + + def get_password(self, service, username): + """Get password of the username for the service + """ + init_part = self._keyring.get_password(service, username) + if init_part: + parts = [init_part,] + i = 1 + while True: + next_part = self._keyring.get_password( + service, + '%s{{part_%d}}' %(username, i)) + if next_part: + parts.append(next_part) + i += 1 + else: + break + return ''.join(parts) + return None + + def set_password(self, service, username, password): + """Set password for the username of the service + """ + segments = range(0, len(password), self._max_password_size) + password_parts = [ + password[i:i + self._max_password_size] for i in segments] + for i, password_part in enumerate(password_parts): + curr_username = username + if i > 0: + curr_username += '{{part_%d}}' %i + self._keyring.set_password(service, curr_username, password_part) + + def delete_password(self, service, username): + self._keyring.delete_password(service, username) + count = itertools.count(1) + while True: + part_name = '%(username)s{{part_%(index)d}}' % dict( + index = count.next(), **vars()) + try: + self._keyring.delete_password(service, part_name) + except errors.PasswordDeleteError: + break diff --git a/awx/lib/site-packages/keyring/backends/pyfs.py b/awx/lib/site-packages/keyring/backends/pyfs.py new file mode 100644 index 0000000000..95d7d3b6c8 --- /dev/null +++ b/awx/lib/site-packages/keyring/backends/pyfs.py @@ -0,0 +1,253 @@ +import os +import base64 + +from ..py27compat import configparser + +import keyring.util.platform_ +from keyring import errors +from keyring.util.escape import escape as escape_for_ini +from keyring.util import properties +from keyring.backend import KeyringBackend, NullCrypter +from . import keyczar + +try: + import fs.opener + import fs.osfs + import fs.errors + import fs.path + import fs.remote +except ImportError: + pass + +def has_pyfs(): + """ + Does this environment have pyfs installed? + Should return False even when Mercurial's Demand Import allowed import of + fs.*. + """ + with errors.ExceptionRaisedContext() as exc: + fs.__name__ + return not bool(exc) + +class BasicKeyring(KeyringBackend): + """BasicKeyring is a Pyfilesystem-based implementation of + keyring. + + It stores the password directly in the file, and supports + encryption and decryption. The encrypted password is stored in base64 + format. + Being based on Pyfilesystem the file can be local or network-based and + served by any of the filesystems supported by Pyfilesystem including Amazon + S3, FTP, WebDAV, memory and more. + """ + + _filename = 'keyring_pyf_pass.cfg' + + def __init__(self, crypter, filename=None, can_create=True, + cache_timeout=None): + super(BasicKeyring, self).__init__() + self._crypter = crypter + self._filename = (filename or + os.path.join(keyring.util.platform_.data_root(), + self.__class__._filename)) + self._can_create = can_create + self._cache_timeout = cache_timeout + + @properties.NonDataProperty + def file_path(self): + """ + The path to the file where passwords are stored. This property + may be overridden by the subclass or at the instance level. + """ + return os.path.join(keyring.util.platform_.data_root(), self.filename) + + @property + def filename(self): + """The filename used to store the passwords. + """ + return self._filename + + def encrypt(self, password): + """Encrypt the password. + """ + if not password or not self._crypter: + return password or '' + return self._crypter.encrypt(password) + + def decrypt(self, password_encrypted): + """Decrypt the password. + """ + if not password_encrypted or not self._crypter: + return password_encrypted or '' + return self._crypter.decrypt(password_encrypted) + + def _open(self, mode='rb'): + """Open the password file in the specified mode + """ + open_file = None + writeable = 'w' in mode or 'a' in mode or '+' in mode + try: + # NOTE: currently the MemOpener does not split off any filename + # which causes errors on close() + # so we add a dummy name and open it separately + if (self.filename.startswith('mem://') or + self.filename.startswith('ram://')): + open_file = fs.opener.fsopendir(self.filename).open('kr.cfg', + mode) + else: + if not hasattr(self, '_pyfs'): + # reuse the pyfilesystem and path + self._pyfs, self._path = fs.opener.opener.parse(self.filename, + writeable=writeable) + # cache if permitted + if self._cache_timeout is not None: + self._pyfs = fs.remote.CacheFS( + self._pyfs, cache_timeout=self._cache_timeout) + open_file = self._pyfs.open(self._path, mode) + except fs.errors.ResourceNotFoundError: + if self._can_create: + segments = fs.opener.opener.split_segments(self.filename) + if segments: + # this seems broken, but pyfilesystem uses it, so we must + fs_name, credentials, url1, url2, path = segments.groups() + assert fs_name, 'Should be a remote filesystem' + host = '' + # allow for domain:port + if ':' in url2: + split_url2 = url2.split('/', 1) + if len(split_url2) > 1: + url2 = split_url2[1] + else: + url2 = '' + host = split_url2[0] + pyfs = fs.opener.opener.opendir('%s://%s' %(fs_name, host)) + # cache if permitted + if self._cache_timeout is not None: + pyfs = fs.remote.CacheFS( + pyfs, cache_timeout=self._cache_timeout) + # NOTE: fs.path.split does not function in the same way os os.path.split... at least under windows + url2_path, url2_filename = os.path.split(url2) + if url2_path and not pyfs.exists(url2_path): + pyfs.makedir(url2_path, recursive=True) + else: + # assume local filesystem + full_url = fs.opener._expand_syspath(self.filename) + # NOTE: fs.path.split does not function in the same way os os.path.split... at least under windows + url2_path, url2 = os.path.split(full_url) + pyfs = fs.osfs.OSFS(url2_path) + + try: + # reuse the pyfilesystem and path + self._pyfs = pyfs + self._path = url2 + return pyfs.open(url2, mode) + except fs.errors.ResourceNotFoundError: + if writeable: + raise + else: + pass + # NOTE: ignore read errors as the underlying caller can fail safely + if writeable: + raise + else: + pass + return open_file + + @property + def config(self): + """load the passwords from the config file + """ + if not hasattr(self, '_config'): + raw_config = configparser.RawConfigParser() + f = self._open() + if f: + raw_config.readfp(f) + f.close() + self._config = raw_config + return self._config + + def get_password(self, service, username): + """Read the password from the file. + """ + service = escape_for_ini(service) + username = escape_for_ini(username) + + # fetch the password + try: + password_base64 = self.config.get(service, username).encode() + # decode with base64 + password_encrypted = base64.decodestring(password_base64) + # decrypted the password + password = self.decrypt(password_encrypted).decode('utf-8') + except (configparser.NoOptionError, configparser.NoSectionError): + password = None + return password + + def set_password(self, service, username, password): + """Write the password in the file. + """ + service = escape_for_ini(service) + username = escape_for_ini(username) + + # encrypt the password + password = password or '' + password_encrypted = self.encrypt(password.encode('utf-8')) + + # encode with base64 + password_base64 = base64.encodestring(password_encrypted).decode() + # write the modification + if not self.config.has_section(service): + self.config.add_section(service) + self.config.set(service, username, password_base64) + config_file = self._open('w') + self.config.write(config_file) + config_file.close() + + def delete_password(self, service, username): + service = escape_for_ini(service) + username = escape_for_ini(username) + + try: + self.config.remove_option(service, username) + except configparser.NoSectionError: + raise errors.PasswordDeleteError('Password not found') + config_file = self._open('w') + self.config.write(config_file) + config_file.close() + + @properties.ClassProperty + @classmethod + def priority(cls): + if not has_pyfs(): + raise RuntimeError("pyfs required") + return 2 + +class PlaintextKeyring(BasicKeyring): + """Unencrypted Pyfilesystem Keyring + """ + + def __init__(self, filename=None, can_create=True, cache_timeout=None): + super(PlaintextKeyring, self).__init__( + NullCrypter(), filename=filename, can_create=can_create, + cache_timeout=cache_timeout) + +class EncryptedKeyring(BasicKeyring): + """Encrypted Pyfilesystem Keyring + """ + + _filename = 'crypted_pyf_pass.cfg' + + def __init__(self, crypter, filename=None, can_create=True, + cache_timeout=None): + super(EncryptedKeyring, self).__init__( + crypter, filename=filename, can_create=can_create, + cache_timeout=cache_timeout) + +class KeyczarKeyring(EncryptedKeyring): + """Encrypted Pyfilesystem Keyring using Keyczar keysets specified in + environment vars + """ + + def __init__(self): + super(KeyczarKeyring, self).__init__( + keyczar.EnvironCrypter()) diff --git a/awx/lib/site-packages/keyring/cli.py b/awx/lib/site-packages/keyring/cli.py new file mode 100644 index 0000000000..944225691b --- /dev/null +++ b/awx/lib/site-packages/keyring/cli.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +"""Simple command line interface to get/set password from a keyring""" + +import getpass +from optparse import OptionParser +import sys + +import keyring +import keyring.core + + +class CommandLineTool(object): + def __init__(self): + self.parser = OptionParser( + usage="%prog [get|set|del] SERVICE USERNAME") + self.parser.add_option("-p", "--keyring-path", + dest="keyring_path", default=None, + help="Path to the keyring backend") + self.parser.add_option("-b", "--keyring-backend", + dest="keyring_backend", default=None, + help="Name of the keyring backend") + + def run(self, argv): + opts, args = self.parser.parse_args(argv) + + try: + kind, service, username = args + except ValueError: + if len(args) == 0: + # Be nice with the user if he just tries to launch the tool + self.parser.print_help() + return 1 + else: + self.parser.error("Wrong number of arguments") + + if opts.keyring_backend is not None: + try: + backend = keyring.core.load_keyring(opts.keyring_path, + opts.keyring_backend) + keyring.set_keyring(backend) + except (Exception,): + # Tons of things can go wrong here: + # ImportError when using "fjkljfljkl" + # AttributeError when using "os.path.bar" + # TypeError when using "__builtins__.str" + # So, we play on the safe side, and catch everything. + e = sys.exc_info()[1] + self.parser.error("Unable to load specified keyring: %s" % e) + + if kind == 'get': + password = keyring.get_password(service, username) + if password is None: + return 1 + + self.output_password(password) + return 0 + + elif kind == 'set': + password = self.input_password("Password for '%s' in '%s': " % + (username, service)) + keyring.set_password(service, username, password) + return 0 + + elif kind == 'del': + password = self.input_password("Deleting password for '%s' in '%s': " % + (username, service)) + keyring.delete_password(service, username) + return 0 + + else: + self.parser.error("You can only 'get', 'del' or 'set' a password.") + pass + + def input_password(self, prompt): + """Ask for a password to the user. + + This mostly exists to ease the testing process. + """ + + return getpass.getpass(prompt) + + def output_password(self, password): + """Output the password to the user. + + This mostly exists to ease the testing process. + """ + + print >> sys.stdout, password + + +def main(argv=None): + """Main command line interface.""" + + if argv is None: + argv = sys.argv[1:] + + cli = CommandLineTool() + return cli.run(argv) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/awx/lib/site-packages/keyring/core.py b/awx/lib/site-packages/keyring/core.py new file mode 100644 index 0000000000..0e91042eed --- /dev/null +++ b/awx/lib/site-packages/keyring/core.py @@ -0,0 +1,141 @@ +""" +core.py + +Created by Kang Zhang on 2009-07-09 +""" +import os +import sys +import warnings + +from .py27compat import configparser + +from keyring import logger +from keyring import backend +from keyring.util import platform_ as platform + + + +def set_keyring(keyring): + """Set current keyring backend. + """ + global _keyring_backend + if not isinstance(keyring, backend.KeyringBackend): + raise TypeError("The keyring must be a subclass of KeyringBackend") + _keyring_backend = keyring + + +def get_keyring(): + """Get current keyring backend. + """ + return _keyring_backend + + +def get_password(service_name, username): + """Get password from the specified service. + """ + return _keyring_backend.get_password(service_name, username) + + +def set_password(service_name, username, password): + """Set password for the user in the specified service. + """ + _keyring_backend.set_password(service_name, username, password) + + +def delete_password(service_name, username): + """Delete the password for the user in the specified service. + """ + _keyring_backend.delete_password(service_name, username) + + +def init_backend(): + """Load a keyring from a config file or for the default platform. + + First try to load the keyring in the config file, if it has not + been declared, assign a default keyring according to the platform. + """ + # select a backend according to the config file + keyring = load_config() + + # if the user doesn't specify a keyring, we apply a default one + if keyring is None: + + keyrings = backend.get_all_keyring() + # rank by priority + keyrings.sort(key = lambda x: -x.priority) + # get the most recommended one + keyring = keyrings[0] + + set_keyring(keyring) + + +def load_keyring(keyring_path, keyring_name): + """ + Load the specified keyring by name (a fully-qualified name to the + keyring, such as 'keyring.backends.file.PlaintextKeyring') + + `keyring_path` is an additional, optional search path and may be None. + **deprecated** In the future, keyring_path must be None. + """ + module_name, sep, class_name = keyring_name.rpartition('.') + if keyring_path is not None and keyring_path not in sys.path: + warnings.warn("keyring_path is deprecated and should always be None", + DeprecationWarning) + sys.path.insert(0, keyring_path) + __import__(module_name) + module = sys.modules[module_name] + return getattr(module, class_name)() + + +def load_config(): + """Load a keyring using the config file. + + The config file can be in the current working directory, or in the user's + home directory. + """ + keyring = None + + filename = 'keyringrc.cfg' + + local_path = os.path.join(os.getcwd(), filename) + config_path = os.path.join(platform.data_root(), filename) + + # search from current working directory and the data root + keyring_cfg_candidates = [local_path, config_path] + + # initialize the keyring_config with the first detected config file + keyring_cfg = None + for path in keyring_cfg_candidates: + keyring_cfg = path + if os.path.exists(path): + break + + if os.path.exists(keyring_cfg): + config = configparser.RawConfigParser() + config.read(keyring_cfg) + _load_keyring_path(config) + + # load the keyring class name, and then load this keyring + try: + if config.has_section("backend"): + keyring_name = config.get("backend", "default-keyring").strip() + else: + raise configparser.NoOptionError('backend', 'default-keyring') + + keyring = load_keyring(None, keyring_name) + except (configparser.NoOptionError, ImportError): + logger.warning("Keyring config file contains incorrect values.\n" + + "Config file: %s" % keyring_cfg) + + return keyring + +def _load_keyring_path(config): + "load the keyring-path option (if present)" + try: + path = config.get("backend", "keyring-path").strip() + sys.path.insert(0, path) + except (configparser.NoOptionError, configparser.NoSectionError): + pass + +# init the _keyring_backend +init_backend() diff --git a/awx/lib/site-packages/keyring/credentials.py b/awx/lib/site-packages/keyring/credentials.py new file mode 100644 index 0000000000..92e3dd68fb --- /dev/null +++ b/awx/lib/site-packages/keyring/credentials.py @@ -0,0 +1,57 @@ +import os +import abc + +class Credential(object): + """Abstract class to manage credentials + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractproperty + def username(self): + return None + + @abc.abstractproperty + def password(self): + return None + +class SimpleCredential(Credential): + """Simple credentials implementation + """ + + def __init__(self, username, password): + self._username = username + self._password = password + + @property + def username(self): + return self._username + + @property + def password(self): + return self._password + +class EnvironCredential(Credential): + """Source credentials from environment variables. + Actual sourcing is deferred until requested. + """ + + def __init__(self, user_env_var, pwd_env_var): + self.user_env_var = user_env_var + self.pwd_env_var = pwd_env_var + + def _get_env(self, env_var): + """Helper to read an environment variable + """ + value = os.environ.get(env_var) + if not value: + raise ValueError('Missing environment variable:%s' %env_var) + return value + + @property + def username(self): + return self._get_env(self.user_env_var) + + @property + def password(self): + return self._get_env(self.pwd_env_var) diff --git a/awx/lib/site-packages/keyring/errors.py b/awx/lib/site-packages/keyring/errors.py new file mode 100644 index 0000000000..ab785eae31 --- /dev/null +++ b/awx/lib/site-packages/keyring/errors.py @@ -0,0 +1,43 @@ +import sys + +class PasswordSetError(Exception): + """Raised when the password can't be set. + """ + +class PasswordDeleteError(Exception): + """Raised when the password can't be deleted. + """ + +class InitError(Exception): + """Raised when the keyring could not be initialised + """ + +class ExceptionRaisedContext(object): + """ + An exception-trapping context that indicates whether an exception was + raised. + """ + def __init__(self, ExpectedException=Exception): + self.ExpectedException = ExpectedException + self.exc_info = None + + def __enter__(self): + self.exc_info = object.__new__(ExceptionInfo) + return self.exc_info + + def __exit__(self, *exc_info): + self.exc_info.__init__(*exc_info) + return self.exc_info.type and issubclass( + self.exc_info.type, self.ExpectedException) + +class ExceptionInfo(object): + def __init__(self, *info): + if not info: + info = sys.exc_info() + self.type, self.value, self.traceback = info + + def __nonzero__(self): + """ + Return True if an exception occurred + """ + return bool(self.type) diff --git a/awx/lib/site-packages/keyring/getpassbackend.py b/awx/lib/site-packages/keyring/getpassbackend.py new file mode 100644 index 0000000000..9706360a32 --- /dev/null +++ b/awx/lib/site-packages/keyring/getpassbackend.py @@ -0,0 +1,13 @@ +"""Specific support for getpass.""" +import os +import getpass + +import keyring.core + + +def get_password(prompt='Password: ', stream=None, + service_name='Python', + username=None): + if username is None: + username = getpass.getuser() + return keyring.core.get_password(service_name, username) diff --git a/awx/lib/site-packages/keyring/http.py b/awx/lib/site-packages/keyring/http.py new file mode 100644 index 0000000000..e14922d18e --- /dev/null +++ b/awx/lib/site-packages/keyring/http.py @@ -0,0 +1,39 @@ +""" +urllib2.HTTPPasswordMgr object using the keyring, for use with the +urllib2.HTTPBasicAuthHandler. + +usage: + import urllib2 + handlers = [urllib2.HTTPBasicAuthHandler(PasswordMgr())] + urllib2.install_opener(handlers) + urllib2.urlopen(...) + +This will prompt for a password if one is required and isn't already +in the keyring. Then, it adds it to the keyring for subsequent use. +""" + +import keyring +import getpass + + +class PasswordMgr(object): + def get_username(self, realm, authuri): + return getpass.getuser() + + def add_password(self, realm, authuri, password): + user = self.get_username(realm, authuri) + keyring.set_password(realm, user, password) + + def find_user_password(self, realm, authuri): + user = self.get_username(realm, authuri) + password = keyring.get_password(realm, user) + if password is None: + prompt = 'password for %(user)s@%(realm)s for '\ + '%(authuri)s: ' % vars() + password = getpass.getpass(prompt) + keyring.set_password(realm, user, password) + return user, password + + def clear_password(self, realm, authuri): + user = self.get_username(realm, authuri) + keyring.delete_password(realm, user) diff --git a/awx/lib/site-packages/keyring/py27compat.py b/awx/lib/site-packages/keyring/py27compat.py new file mode 100644 index 0000000000..33a547bf18 --- /dev/null +++ b/awx/lib/site-packages/keyring/py27compat.py @@ -0,0 +1,13 @@ +""" +Compatibility support for Python 2.7. Remove when Python 2.7 support is +no longer required. +""" +try: + import configparser +except ImportError: + import ConfigParser as configparser + +if hasattr(__builtins__, 'raw_input'): + input = raw_input +else: + input = input diff --git a/awx/lib/site-packages/keyring/tests/__init__.py b/awx/lib/site-packages/keyring/tests/__init__.py new file mode 100644 index 0000000000..14a394dce4 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/__init__.py @@ -0,0 +1,4 @@ +import logging +import sys + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) diff --git a/awx/lib/site-packages/keyring/tests/backends/__init__.py b/awx/lib/site-packages/keyring/tests/backends/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/keyring/tests/backends/test_Gnome.py b/awx/lib/site-packages/keyring/tests/backends/test_Gnome.py new file mode 100644 index 0000000000..99940e6e26 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_Gnome.py @@ -0,0 +1,60 @@ +import types +import sys + +from ..py30compat import unittest +from ..test_backend import BackendBasicTests +from ..util import ImportKiller, Environ, NoNoneDictMutator +from keyring.backends import Gnome + + +def ImportBlesser(*names, **changes): + """A context manager to temporarily make it possible to import a module""" + for name in names: + changes[name] = types.ModuleType(name) + return NoNoneDictMutator(sys.modules, **changes) + + +@unittest.skipUnless(Gnome.Keyring.viable, "Need GnomeKeyring") +class GnomeKeyringTestCase(BackendBasicTests, unittest.TestCase): + + def environ(self): + return dict(GNOME_KEYRING_CONTROL='1', + DISPLAY='1', + DBUS_SESSION_BUS_ADDRESS='1') + + def init_keyring(self): + k = Gnome.Keyring() + + # Store passwords in the session (in-memory) keyring for the tests. This + # is going to be automatically cleared when the user logoff. + k.KEYRING_NAME = 'session' + + return k + + def test_supported(self): + with ImportBlesser('gi.repository'): + with Environ(**self.environ()): + self.assertTrue(Gnome.Keyring.viable) + + def test_supported_no_module(self): + with NoNoneDictMutator(Gnome.__dict__, GnomeKeyring=None): + with Environ(**self.environ()): + self.assertFalse(Gnome.Keyring.viable) + + def test_supported_no_keyring(self): + environ = self.environ() + environ['GNOME_KEYRING_CONTROL'] = None + with Environ(**environ): + self.assertFalse(Gnome.Keyring.viable) + + def test_supported_no_display(self): + environ = self.environ() + environ['DISPLAY'] = None + with Environ(**environ): + self.assertFalse(Gnome.Keyring.viable) + + def test_supported_no_session(self): + environ = self.environ() + environ['DBUS_SESSION_BUS_ADDRESS'] = None + with Environ(**environ): + self.assertFalse(Gnome.Keyring.viable) diff --git a/awx/lib/site-packages/keyring/tests/backends/test_Google.py b/awx/lib/site-packages/keyring/tests/backends/test_Google.py new file mode 100644 index 0000000000..82362aa842 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_Google.py @@ -0,0 +1,371 @@ +import codecs +import base64 +import cPickle + +import keyring.py27compat +from ..py30compat import unittest +from ..test_backend import BackendBasicTests +from keyring.backends import Google +from keyring.credentials import SimpleCredential +from keyring.backend import NullCrypter +from keyring import errors +from .. import mocks + +def is_gdata_supported(): + try: + __import__('gdata.service') + except ImportError: + return False + return True + +def init_google_docs_keyring(client, can_create=True, + input_getter=keyring.py27compat.input): + credentials = SimpleCredential('foo', 'bar') + return Google.DocsKeyring(credentials, + 'test_src', + NullCrypter(), + client=client, + can_create=can_create, + input_getter=input_getter + ) + +@unittest.skipUnless(is_gdata_supported(), + "Need Google Docs (gdata)") +class GoogleDocsKeyringTestCase(BackendBasicTests, unittest.TestCase): + """Run all the standard tests on a new keyring""" + + def init_keyring(self): + client = mocks.MockDocumentService() + client.SetClientLoginToken('foo') + return init_google_docs_keyring(client) + +@unittest.skipUnless(is_gdata_supported(), + "Need Google Docs (gdata)") +class GoogleDocsKeyringInteractionTestCase(unittest.TestCase): + """Additional tests for Google Doc interactions""" + + def _init_client(self, set_token=True): + client = mocks.MockDocumentService() + if set_token: + client.SetClientLoginToken('interaction') + return client + + def _init_keyring(self, client): + self.keyring = init_google_docs_keyring(client) + + def _init_listfeed(self): + listfeed = mocks.MockListFeed() + listfeed._entry = [mocks.MockDocumentListEntry(), + mocks.MockDocumentListEntry() + ] + return listfeed + + def _encode_data(self, data): + return base64.urlsafe_b64encode(cPickle.dumps(data)) + + def test_handles_auth_failure(self): + import gdata + client = self._init_client(set_token=False) + client._login_err = gdata.service.BadAuthentication + self._init_keyring(client) + try: + google_client = self.keyring.client + self.assertTrue(False, 'Should throw InitError') + except errors.InitError: + pass + + def test_handles_auth_error(self): + import gdata + client = self._init_client(set_token=False) + client._login_err = gdata.service.Error + self._init_keyring(client) + try: + google_client = self.keyring.client + self.assertTrue(False, 'Should throw InitError') + except errors.InitError: + pass + + def test_handles_login_captcha(self): + import gdata + client = self._init_client(set_token=False) + client._login_err = gdata.service.CaptchaRequired + client.captcha_url = 'a_captcha_url' + client.captcha_token = 'token' + self.get_input_called = False + def _get_input(prompt): + self.get_input_called = True + delattr(client, '_login_err') + return 'Foo' + self.keyring = init_google_docs_keyring(client, input_getter=_get_input) + google_client = self.keyring.client + self.assertTrue(self.get_input_called, 'Should have got input') + + def test_retrieves_existing_keyring_with_and_without_bom(self): + client = self._init_client() + dummy_entries = dict(section1=dict(user1='pwd1')) + no_utf8_bom_entries = self._encode_data(dummy_entries) + client._request_response = dict(status=200, data=no_utf8_bom_entries) + client._listfeed = self._init_listfeed() + self._init_keyring(client) + self.assertEqual(self.keyring.get_password('section1', 'user1'), 'pwd1') + + utf8_bom_entries = codecs.BOM_UTF8 + no_utf8_bom_entries + client._request_response = dict(status=200, data=utf8_bom_entries) + self._init_keyring(client) + self.assertEqual(self.keyring.get_password('section1', 'user1'), 'pwd1') + + def test_handles_retrieve_failure(self): + import gdata + client = self._init_client() + client._listfeed = self._init_listfeed() + client._request_response = dict(status=400, + reason='Data centre explosion') + self._init_keyring(client) + try: + self.keyring.get_password('any', 'thing') + self.assertTrue(False, 'Should throw InitError') + except errors.InitError: + pass + + def test_handles_corrupt_retrieve(self): + client = self._init_client() + dummy_entries = dict(section1=dict(user1='pwd1')) + client._request_response = dict(status=200, data='broken' + self._encode_data(dummy_entries)) + client._listfeed = self._init_listfeed() + self._init_keyring(client) + try: + self.keyring.get_password('any', 'thing') + self.assertTrue(False, 'Should throw InitError') + except errors.InitError: + pass + + def test_no_create_if_requested(self): + client = self._init_client() + self.keyring = init_google_docs_keyring(client, can_create=False) + try: + self.keyring.get_password('any', 'thing') + self.assertTrue(False, 'Should throw InitError') + except errors.InitError: + pass + + def test_no_set_if_create_folder_fails_on_new_keyring(self): + import gdata + client = self._init_client() + client._create_folder_err = gdata.service.RequestError + self._init_keyring(client) + self.assertEqual(self.keyring.get_password('service-a', 'user-A'), None, + 'No password should be set in new keyring') + try: + self.keyring.set_password('service-a', 'user-A', 'password-A') + self.assertTrue(False, 'Should throw PasswordSetError') + except errors.PasswordSetError: + pass + self.assertEqual(self.keyring.get_password('service-a', 'user-A'), None, + 'No password should be set after write fail') + + def test_no_set_if_write_fails_on_new_keyring(self): + import gdata + client = self._init_client() + client._upload_err = gdata.service.RequestError + self._init_keyring(client) + self.assertEqual(self.keyring.get_password('service-a', 'user-A'), None, + 'No password should be set in new keyring') + try: + self.keyring.set_password('service-a', 'user-A', 'password-A') + self.assertTrue(False, 'Should throw PasswordSetError') + except errors.PasswordSetError: + pass + self.assertEqual(self.keyring.get_password('service-a', 'user-A'), None, + 'No password should be set after write fail') + + def test_no_set_if_write_fails_on_existing_keyring(self): + import gdata + client = self._init_client() + dummy_entries = dict(sectionB=dict(user9='pwd9')) + client._request_response = dict(status=200, data=self._encode_data(dummy_entries)) + client._put_err = gdata.service.RequestError + client._listfeed = self._init_listfeed() + self._init_keyring(client) + self.assertEqual(self.keyring.get_password('sectionB', 'user9'), 'pwd9', + 'Correct password should be set in existing keyring') + try: + self.keyring.set_password('sectionB', 'user9', 'Not the same pwd') + self.assertTrue(False, 'Should throw PasswordSetError') + except errors.PasswordSetError: + pass + self.assertEqual(self.keyring.get_password('sectionB', 'user9'), 'pwd9', + 'Password should be unchanged after write fail') + + def test_writes_correct_data_to_google_docs(self): + import gdata + client = self._init_client() + dummy_entries = dict(sectionWriteChk=dict(userWriteChk='pwd')) + client._request_response = dict(status=200, data=self._encode_data(dummy_entries)) + client._listfeed = self._init_listfeed() + self._init_keyring(client) + self.keyring.set_password('sectionWriteChk', + 'userWritechk', + 'new_pwd') + self.assertIsNotNone(client._put_data, 'Should have written data') + self.assertEquals( + 'new_pwd', + client._put_data.get('sectionWriteChk').get('userWritechk'), + 'Did not write updated password!') + + def test_handles_write_conflict_on_different_service(self): + import gdata + client = self._init_client() + dummy_entries = dict(sectionWriteConflictA=dict( + userwriteConflictA='pwdwriteConflictA')) + client._request_response = dict(status=200, data=self._encode_data(dummy_entries)) + client._put_err = [(gdata.service.RequestError, + {'status': '406', + 'reason': 'Conflict'}),] + client._listfeed = self._init_listfeed() + self._init_keyring(client) + self.assertEqual( + self.keyring.get_password('sectionWriteConflictA', + 'userwriteConflictA'), + 'pwdwriteConflictA', + 'Correct password should be set in existing keyring') + dummy_entries['diffSection'] = dict(foo='bar') + client._request_response = dict(status=200, data=self._encode_data(dummy_entries)) + new_pwd = 'Not the same pwd' + self.keyring.set_password('sectionWriteConflictA', + 'userwriteConflictA', + new_pwd) + + self.assertEquals(self.keyring.get_password('sectionWriteConflictA', + 'userwriteConflictA'), + new_pwd + ) + self.assertEqual(1, client._put_count, + 'Write not called after conflict resolution') + + def test_handles_write_conflict_on_same_service_and_username(self): + import gdata + client = self._init_client() + dummy_entries = dict(sectionWriteConflictB=dict( + userwriteConflictB='pwdwriteConflictB')) + client._request_response = dict(status=200, data=self._encode_data(dummy_entries)) + client._put_err = (gdata.service.RequestError, + {'status': '406', + 'reason': 'Conflict'}) + client._listfeed = self._init_listfeed() + self._init_keyring(client) + self.assertEqual( + self.keyring.get_password('sectionWriteConflictB', + 'userwriteConflictB'), + 'pwdwriteConflictB', + 'Correct password should be set in existing keyring') + conflicting_dummy_entries = dict(sectionWriteConflictB=dict( + userwriteConflictB='pwdwriteConflictC')) + client._request_response = dict(status=200, data=self._encode_data(conflicting_dummy_entries)) + try: + self.keyring.set_password('sectionWriteConflictB', + 'userwriteConflictB', + 'new_pwd') + self.assertTrue(False, 'Should throw PasswordSetError') + except errors.PasswordSetError: + pass + + def test_handles_write_conflict_with_identical_change(self): + import gdata + client = self._init_client() + dummy_entries = dict(sectionWriteConflictC=dict( + userwriteConflictC='pwdwriteConflictC')) + client._request_response = dict(status=200, data=self._encode_data(dummy_entries)) + client._put_err = [(gdata.service.RequestError, + {'status': '406', + 'reason': 'Conflict'}),] + client._listfeed = self._init_listfeed() + self._init_keyring(client) + self.assertEqual( + self.keyring.get_password('sectionWriteConflictC', + 'userwriteConflictC'), + 'pwdwriteConflictC', + 'Correct password should be set in existing keyring') + new_pwd = 'Not the same pwd' + conflicting_dummy_entries = dict(sectionWriteConflictC=dict( + userwriteConflictC=new_pwd)) + client._request_response = dict(status=200, data=self._encode_data(conflicting_dummy_entries)) + self.keyring.set_password('sectionWriteConflictC', + 'userwriteConflictC', + new_pwd) + self.assertEquals(self.keyring.get_password('sectionWriteConflictC', + 'userwriteConflictC'), + new_pwd + ) + + def test_handles_broken_google_put_when_non_owner_update_fails(self): + """Google Docs has a bug when putting to a non-owner + see GoogleDocsKeyring._save_keyring() + """ + import gdata + client = self._init_client() + dummy_entries = dict(sectionBrokenPut=dict( + userBrokenPut='pwdBrokenPut')) + client._request_response = dict(status=200, data=self._encode_data(dummy_entries)) + client._put_err = [( + gdata.service.RequestError, + { 'status': '400', + 'body': 'Sorry, there was an error saving the file. Please try again.', + 'reason': 'Bad Request'}),] + client._listfeed = self._init_listfeed() + self._init_keyring(client) + new_pwd = 'newPwdBrokenPut' + correct_read_entries = dict(sectionBrokenPut=dict( + userBrokenPut='pwdBrokenPut')) + client._request_response = dict(status=200, + data=self._encode_data(correct_read_entries)) + try: + self.keyring.set_password('sectionBrokenPut', + 'userBrokenPut', + new_pwd) + self.assertTrue(False, 'Should throw PasswordSetError') + except errors.PasswordSetError: + pass + + def test_handles_broken_google_put_when_non_owner_update(self): + """Google Docs has a bug when putting to a non-owner + see GoogleDocsKeyring._save_keyring() + """ + import gdata + client = self._init_client() + dummy_entries = dict(sectionBrokenPut=dict( + userBrokenPut='pwdBrokenPut')) + client._request_response = dict(status=200, data=self._encode_data(dummy_entries)) + client._put_err = [( + gdata.service.RequestError, + { 'status': '400', + 'body': 'Sorry, there was an error saving the file. Please try again.', + 'reason': 'Bad Request'}),] + client._listfeed = self._init_listfeed() + self._init_keyring(client) + new_pwd = 'newPwdBrokenPut' + correct_read_entries = dict(sectionBrokenPut=dict( + userBrokenPut=new_pwd)) + client._request_response = dict(status=200, + data=self._encode_data(correct_read_entries)) + self.keyring.set_password('sectionBrokenPut', + 'userBrokenPut', + new_pwd) + self.assertEquals(self.keyring.get_password('sectionBrokenPut', + 'userBrokenPut'), + new_pwd) + + def test_uses_existing_folder(self): + import gdata + client = self._init_client() + # should not happen + client._create_folder_err = gdata.service.RequestError + + self._init_keyring(client) + self.assertEqual(self.keyring.get_password('service-a', 'user-A'), None, + 'No password should be set in new keyring') + client._listfeed = self._init_listfeed() + self.keyring.set_password('service-a', 'user-A', 'password-A') + self.assertIsNotNone(client._upload_data, 'Should have written data') + self.assertEqual(self.keyring.get_password('service-a', 'user-A'), + 'password-A', + 'Correct password should be set') diff --git a/awx/lib/site-packages/keyring/tests/backends/test_OS_X.py b/awx/lib/site-packages/keyring/tests/backends/test_OS_X.py new file mode 100644 index 0000000000..f3c90775b3 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_OS_X.py @@ -0,0 +1,24 @@ +import sys + +from ..test_backend import BackendBasicTests +from ..py30compat import unittest +from keyring.backends import OS_X + +def is_osx_keychain_supported(): + return sys.platform in ('mac','darwin') + +@unittest.skipUnless(is_osx_keychain_supported(), + "Need OS X") +class OSXKeychainTestCase(BackendBasicTests, unittest.TestCase): + + def init_keyring(self): + return OS_X.Keyring() + + @unittest.expectedFailure + def test_delete_present(self): + """Not implemented""" + super(OSXKeychainTestCase, self).test_delete_present() + +def test_SecurityCommand(): + assert OS_X.SecurityCommand('get') == 'get-generic-password' + assert OS_X.SecurityCommand('set', 'internet') == 'set-internet-password' diff --git a/awx/lib/site-packages/keyring/tests/backends/test_SecretService.py b/awx/lib/site-packages/keyring/tests/backends/test_SecretService.py new file mode 100644 index 0000000000..39d2e80fed --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_SecretService.py @@ -0,0 +1,22 @@ +from ..py30compat import unittest +from ..test_backend import BackendBasicTests +from keyring.backends import SecretService +from .. import util + +@unittest.skipUnless(SecretService.Keyring.viable, + "SecretStorage package is needed for SecretServiceKeyring") +class SecretServiceKeyringTestCase(BackendBasicTests, unittest.TestCase): + __test__ = True + + def init_keyring(self): + print ("Testing SecretServiceKeyring; the following " + "password prompts are for this keyring") + return SecretService.Keyring() + +class SecretServiceKeyringUnitTests(unittest.TestCase): + def test_supported_no_secretstorage(self): + """ + SecretService Keyring is not viable if secretstorage can't be imported. + """ + with util.NoNoneDictMutator(SecretService.__dict__, secretstorage=None): + self.assertFalse(SecretService.Keyring.viable) diff --git a/awx/lib/site-packages/keyring/tests/backends/test_Windows.py b/awx/lib/site-packages/keyring/tests/backends/test_Windows.py new file mode 100644 index 0000000000..a8bb1bbed1 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_Windows.py @@ -0,0 +1,65 @@ +import sys + +from ..py30compat import unittest + +import keyring.backends.Windows +from ..test_backend import BackendBasicTests +from .test_file import FileKeyringTests + +def is_win32_crypto_supported(): + try: + __import__('keyring.backends._win_crypto') + except ImportError: + return False + return sys.platform in ['win32'] and sys.getwindowsversion()[-2] == 2 + +def is_winvault_supported(): + try: + __import__('win32cred') + has_pywin32 = True + except ImportError: + has_pywin32 = False + return ( + sys.platform in ['win32'] and sys.getwindowsversion().major >= 6 + and has_pywin32 + ) + + +@unittest.skipUnless(is_win32_crypto_supported(), + "Need Windows") +class Win32CryptoKeyringTestCase(FileKeyringTests, unittest.TestCase): + + def init_keyring(self): + return keyring.backends.Windows.EncryptedKeyring() + + +@unittest.skipUnless(is_winvault_supported(), + "Need WinVault") +class WinVaultKeyringTestCase(BackendBasicTests, unittest.TestCase): + def tearDown(self): + # clean up any credentials created + for cred in self.credentials_created: + try: + self.keyring.delete_password(*cred) + except (Exception,): + e = sys.exc_info()[1] + print >> sys.stderr, e + + def init_keyring(self): + return keyring.backends.Windows.WinVaultKeyring() + + +@unittest.skipUnless(keyring.backends.Windows.RegistryKeyring.viable + and sys.version_info > (3,), "RegistryKeyring not viable") +class RegistryKeyringTestCase(BackendBasicTests, unittest.TestCase): + def tearDown(self): + # clean up any credentials created + for cred in self.credentials_created: + try: + self.keyring.delete_password(*cred) + except (Exception,): + e = sys.exc_info()[1] + print >> sys.stderr, e + + def init_keyring(self): + return keyring.backends.Windows.RegistryKeyring() diff --git a/awx/lib/site-packages/keyring/tests/backends/test_crypto.py b/awx/lib/site-packages/keyring/tests/backends/test_crypto.py new file mode 100644 index 0000000000..ae7b581aca --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_crypto.py @@ -0,0 +1,32 @@ +import mock + +from ..py30compat import unittest +from .test_file import FileKeyringTests + +from keyring.backends import file + +def is_crypto_supported(): + try: + __import__('Crypto.Cipher.AES') + __import__('Crypto.Protocol.KDF') + __import__('Crypto.Random') + except ImportError: + return False + return True + + +@unittest.skipUnless(is_crypto_supported(), + "Need Crypto module") +class CryptedFileKeyringTestCase(FileKeyringTests, unittest.TestCase): + + def setUp(self): + super(self.__class__, self).setUp() + fake_getpass = mock.Mock(return_value='abcdef') + self.patcher = mock.patch('getpass.getpass', fake_getpass) + self.patcher.start() + + def tearDown(self): + self.patcher.stop() + + def init_keyring(self): + return file.EncryptedKeyring() diff --git a/awx/lib/site-packages/keyring/tests/backends/test_file.py b/awx/lib/site-packages/keyring/tests/backends/test_file.py new file mode 100644 index 0000000000..d2cd3f351e --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_file.py @@ -0,0 +1,52 @@ +import os +import tempfile +import sys + +from ..py30compat import unittest + +from ..test_backend import BackendBasicTests +from ..util import random_string + +from keyring.backends import file + +class FileKeyringTests(BackendBasicTests): + + def setUp(self): + super(FileKeyringTests, self).setUp() + self.keyring = self.init_keyring() + self.keyring.file_path = self.tmp_keyring_file = tempfile.mktemp() + + def tearDown(self): + try: + os.unlink(self.tmp_keyring_file) + except (OSError,): + e = sys.exc_info()[1] + if e.errno != 2: # No such file or directory + raise + + def test_encrypt_decrypt(self): + password = random_string(20) + # keyring.encrypt expects bytes + password = password.encode('utf-8') + encrypted = self.keyring.encrypt(password) + + self.assertEqual(password, self.keyring.decrypt(encrypted)) + + +class UncryptedFileKeyringTestCase(FileKeyringTests, unittest.TestCase): + + def init_keyring(self): + return file.PlaintextKeyring() + + @unittest.skipIf(sys.platform == 'win32', + "Group/World permissions aren't meaningful on Windows") + def test_keyring_not_created_world_writable(self): + """ + Ensure that when keyring creates the file that it's not overly- + permissive. + """ + self.keyring.set_password('system', 'user', 'password') + + self.assertTrue(os.path.exists(self.keyring.file_path)) + group_other_perms = os.stat(self.keyring.file_path).st_mode & 0077 + self.assertEqual(group_other_perms, 0) diff --git a/awx/lib/site-packages/keyring/tests/backends/test_keyczar.py b/awx/lib/site-packages/keyring/tests/backends/test_keyczar.py new file mode 100644 index 0000000000..afcb94293f --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_keyczar.py @@ -0,0 +1,83 @@ +import os + +from ..py30compat import unittest + +from keyring.backends import keyczar +from .. import mocks + +def is_keyczar_supported(): + return hasattr(keyczar, 'keyczar') + +@unittest.skipUnless(is_keyczar_supported(), + "Need Keyczar") +class KeyczarCrypterTestCase(unittest.TestCase): + + """Test the keyczar crypter""" + + def setUp(self): + self._orig_keyczar = keyczar.keyczar + keyczar.keyczar = mocks.MockKeyczar() + + def tearDown(self): + keyczar.keyczar = self._orig_keyczar + if keyczar.EnvironCrypter.KEYSET_ENV_VAR in os.environ: + del os.environ[keyczar.EnvironCrypter.KEYSET_ENV_VAR] + if keyczar.EnvironCrypter.ENC_KEYSET_ENV_VAR in os.environ: + del os.environ[keyczar.EnvironCrypter.ENC_KEYSET_ENV_VAR] + + def testKeyczarCrypterWithUnencryptedReader(self): + """ + """ + location = 'bar://baz' + kz_crypter = keyczar.Crypter(location) + self.assertEquals(location, kz_crypter.keyset_location) + self.assertIsNone(kz_crypter.encrypting_keyset_location) + self.assertIsInstance(kz_crypter.crypter, mocks.MockKeyczarCrypter) + self.assertIsInstance(kz_crypter.crypter.reader, mocks.MockKeyczarReader) + self.assertEquals(location, kz_crypter.crypter.reader.location) + + def testKeyczarCrypterWithEncryptedReader(self): + """ + """ + location = 'foo://baz' + encrypting_location = 'castle://aaargh' + kz_crypter = keyczar.Crypter(location, encrypting_location) + self.assertEquals(location, kz_crypter.keyset_location) + self.assertEquals(encrypting_location, + kz_crypter.encrypting_keyset_location) + self.assertIsInstance(kz_crypter.crypter, mocks.MockKeyczarCrypter) + self.assertIsInstance(kz_crypter.crypter.reader, + mocks.MockKeyczarEncryptedReader) + self.assertEquals(location, kz_crypter.crypter.reader._reader.location) + self.assertEquals(encrypting_location, + kz_crypter.crypter.reader._crypter.reader.location) + + def testKeyczarCrypterEncryptDecryptHandlesEmptyNone(self): + location = 'castle://aargh' + kz_crypter = keyczar.Crypter(location) + self.assertEquals('', kz_crypter.encrypt('')) + self.assertEquals('', kz_crypter.encrypt(None)) + self.assertEquals('', kz_crypter.decrypt('')) + self.assertEquals('', kz_crypter.decrypt(None)) + + def testEnvironCrypterReadsCorrectValues(self): + location = 'foo://baz' + encrypting_location = 'castle://aaargh' + kz_crypter = keyczar.EnvironCrypter() + os.environ[kz_crypter.KEYSET_ENV_VAR] = location + self.assertEqual(location, kz_crypter.keyset_location) + self.assertIsNone(kz_crypter.encrypting_keyset_location) + os.environ[kz_crypter.ENC_KEYSET_ENV_VAR] = encrypting_location + self.assertEqual(encrypting_location, kz_crypter.encrypting_keyset_location) + + def testEnvironCrypterThrowsExceptionOnMissingValues(self): + location = 'foo://baz' + encrypting_location = 'castle://aaargh' + kz_crypter = keyczar.EnvironCrypter() + try: + locn = kz_crypter.keyset_location + self.assertTrue(False, msg="Should have thrown ValueError") + except ValueError: + # expected + pass + self.assertIsNone(kz_crypter.encrypting_keyset_location) diff --git a/awx/lib/site-packages/keyring/tests/backends/test_kwallet.py b/awx/lib/site-packages/keyring/tests/backends/test_kwallet.py new file mode 100644 index 0000000000..f9e60c157a --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_kwallet.py @@ -0,0 +1,83 @@ +from ..py30compat import unittest + +from keyring.backends import kwallet +from ..test_backend import BackendBasicTests + +def is_qt4_supported(): + try: + __import__('PyQt4.QtGui') + except ImportError: + return False + return True + +@unittest.skipUnless(kwallet.Keyring.viable, "Need KWallet") +class KDEKWalletTestCase(BackendBasicTests, unittest.TestCase): + + def init_keyring(self): + return kwallet.Keyring() + + +class UnOpenableKWallet(object): + """A module-like object used to test KDE wallet fall-back.""" + + Synchronous = None + + def openWallet(self, *args): + return None + + def NetworkWallet(self): + return None + + +class FauxQtGui(object): + """A fake module-like object used in testing the open_kwallet function.""" + + class qApp: + @staticmethod + def instance(): + pass + + class QApplication(object): + def __init__(self, *args): + pass + + def exit(self): + pass + + class QWidget(object): + def __init__(self, *args): + pass + + def winId(self): + pass + +class KDEWalletCanceledTestCase(unittest.TestCase): + + def test_user_canceled(self): + # If the user cancels either the "enter your password to unlock the + # keyring" dialog or clicks "deny" on the "can this application access + # the wallet" dialog then openWallet() will return None. The + # open_wallet() function should handle that eventuality by returning + # None to signify that the KWallet backend is not available. + self.assertEqual( + kwallet.open_kwallet(UnOpenableKWallet(), FauxQtGui()), + None) + + +@unittest.skipUnless(kwallet.Keyring.viable and + is_qt4_supported(), + "Need KWallet and Qt4") +class KDEKWalletInQApplication(unittest.TestCase): + def test_QApplication(self): + try: + from PyKDE4.kdeui import KWallet + from PyQt4.QtGui import QApplication + except: + return + + app = QApplication([]) + wallet = kwallet.open_kwallet() + self.assertTrue(isinstance(wallet, KWallet.Wallet), + msg="The object wallet should be type " + " but it is: %s" % repr(wallet)) + app.exit() diff --git a/awx/lib/site-packages/keyring/tests/backends/test_multi.py b/awx/lib/site-packages/keyring/tests/backends/test_multi.py new file mode 100644 index 0000000000..080a366b23 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_multi.py @@ -0,0 +1,58 @@ +from ..py30compat import unittest + +from keyring.backend import KeyringBackend +from keyring.backends import multi +import keyring.errors + +class MultipartKeyringWrapperTestCase(unittest.TestCase): + + """Test the wrapper that breaks passwords into smaller chunks""" + + class MockKeyring(KeyringBackend): + + priority = 1 + + def __init__(self): + self.passwords = {} + + def get_password(self, service, username): + return self.passwords.get(service+username) + + def set_password(self, service, username, password): + self.passwords[service+username] = password + + def delete_password(self, service, username): + try: + del self.passwords[service+username] + except KeyError: + raise keyring.errors.PasswordDeleteError('not found') + + def testViablePassThru(self): + kr = multi.MultipartKeyringWrapper(self.MockKeyring()) + self.assertTrue(kr.viable) + + def testMissingPassword(self): + wrapped_kr = self.MockKeyring() + kr = multi.MultipartKeyringWrapper(wrapped_kr) + self.assertIsNone(kr.get_password('s1', 'u1')) + + def testSmallPasswordSetInSinglePart(self): + wrapped_kr = self.MockKeyring() + kr = multi.MultipartKeyringWrapper(wrapped_kr) + kr.set_password('s1', 'u1', 'p1') + self.assertEquals(wrapped_kr.passwords, {'s1u1':'p1'}) + # should be able to read it back + self.assertEquals(kr.get_password('s1', 'u1'), 'p1') + + def testLargePasswordSetInMultipleParts(self): + wrapped_kr = self.MockKeyring() + kr = multi.MultipartKeyringWrapper(wrapped_kr, + max_password_size=2) + kr.set_password('s2', 'u2', '0123456') + self.assertEquals(wrapped_kr.passwords, {'s2u2':'01', + 's2u2{{part_1}}':'23', + 's2u2{{part_2}}':'45', + "s2u2{{part_3}}":'6'}) + + # should be able to read it back + self.assertEquals(kr.get_password('s2', 'u2'), '0123456') diff --git a/awx/lib/site-packages/keyring/tests/backends/test_pyfs.py b/awx/lib/site-packages/keyring/tests/backends/test_pyfs.py new file mode 100644 index 0000000000..7ed0eb473d --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/backends/test_pyfs.py @@ -0,0 +1,130 @@ +import os +import tempfile + +from ..py30compat import unittest + +import keyring.backend +from keyring.backends import pyfs +from ..test_backend import BackendBasicTests, random_string + + +class ReverseCrypter(keyring.backend.Crypter): + """Very silly crypter class""" + + def encrypt(self, value): + return value[::-1] + + def decrypt(self, value): + return value[::-1] + +class PyfilesystemKeyringTests(BackendBasicTests): + """Base class for Pyfilesystem tests""" + + def setUp(self): + super(PyfilesystemKeyringTests, self).setUp() + self.keyring = self.init_keyring() + + def tearDown(self): + del self.keyring + + def test_encrypt_decrypt(self): + password = random_string(20) + encrypted = self.keyring.encrypt(password) + + self.assertEqual(password, self.keyring.decrypt(encrypted)) + +@unittest.skipUnless(pyfs.BasicKeyring.viable, "Need Pyfilesystem") +class UnencryptedMemoryPyfilesystemKeyringNoSubDirTestCase( + PyfilesystemKeyringTests, unittest.TestCase): + """Test in memory with no encryption""" + + keyring_filename = 'mem://unencrypted' + + def init_keyring(self): + return keyring.backends.pyfs.PlaintextKeyring( + filename=self.keyring_filename) + +@unittest.skipUnless(pyfs.BasicKeyring.viable, "Need Pyfilesystem") +class UnencryptedMemoryPyfilesystemKeyringSubDirTestCase( + PyfilesystemKeyringTests, unittest.TestCase): + """Test in memory with no encryption""" + + keyring_filename = 'mem://some/sub/dir/unencrypted' + + def init_keyring(self): + return keyring.backends.pyfs.PlaintextKeyring( + filename=self.keyring_filename) + +@unittest.skipUnless(pyfs.BasicKeyring.viable, "Need Pyfilesystem") +class UnencryptedLocalPyfilesystemKeyringNoSubDirTestCase( + PyfilesystemKeyringTests, unittest.TestCase): + """Test using local temp files with no encryption""" + + keyring_filename = '%s/keyring.cfg' %tempfile.mkdtemp() + + def init_keyring(self): + return keyring.backends.pyfs.PlaintextKeyring( + filename=self.keyring_filename) + + def test_handles_preexisting_keyring(self): + from fs.opener import opener + fs, path = opener.parse(self.keyring_filename, writeable=True) + keyring_file = fs.open(path, 'wb') + keyring_file.write( + """[svc1] +user1 = cHdkMQ== + """) + keyring_file.close() + pyf_keyring = keyring.backends.pyfs.PlaintextKeyring( + filename=self.keyring_filename) + self.assertEquals('pwd1', pyf_keyring.get_password('svc1', 'user1')) + + def tearDown(self): + del self.keyring + if os.path.exists(self.keyring_filename): + os.remove(self.keyring_filename) + +@unittest.skipUnless(pyfs.BasicKeyring.viable, "Need Pyfilesystem") +class UnencryptedLocalPyfilesystemKeyringSubDirTestCase( + PyfilesystemKeyringTests, unittest.TestCase): + """Test using local temp files with no encryption""" + + keyring_dir = os.path.join(tempfile.mkdtemp(), 'more', 'sub', 'dirs') + keyring_filename = os.path.join(keyring_dir, 'keyring.cfg') + + def init_keyring(self): + + if not os.path.exists(self.keyring_dir): + os.makedirs(self.keyring_dir) + return keyring.backends.pyfs.PlaintextKeyring( + filename=self.keyring_filename) + +@unittest.skipUnless(pyfs.BasicKeyring.viable, "Need Pyfilesystem") +class EncryptedMemoryPyfilesystemKeyringTestCase(PyfilesystemKeyringTests, + unittest.TestCase): + """Test in memory with encryption""" + + def init_keyring(self): + return keyring.backends.pyfs.EncryptedKeyring( + ReverseCrypter(), + filename='mem://encrypted/keyring.cfg') + +@unittest.skipUnless(pyfs.BasicKeyring.viable, "Need Pyfilesystem") +class EncryptedLocalPyfilesystemKeyringNoSubDirTestCase( + PyfilesystemKeyringTests, unittest.TestCase): + """Test using local temp files with encryption""" + + def init_keyring(self): + return keyring.backends.pyfs.EncryptedKeyring( + ReverseCrypter(), + filename='temp://keyring.cfg') + +@unittest.skipUnless(pyfs.BasicKeyring.viable, "Need Pyfilesystem") +class EncryptedLocalPyfilesystemKeyringSubDirTestCase( + PyfilesystemKeyringTests, unittest.TestCase): + """Test using local temp files with encryption""" + + def init_keyring(self): + return keyring.backends.pyfs.EncryptedKeyring( + ReverseCrypter(), + filename='temp://a/sub/dir/hierarchy/keyring.cfg') diff --git a/awx/lib/site-packages/keyring/tests/mocks.py b/awx/lib/site-packages/keyring/tests/mocks.py new file mode 100644 index 0000000000..b45d81b0b2 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/mocks.py @@ -0,0 +1,216 @@ +""" +mocks.py + +Various mock objects for testing +""" + +import cPickle +import base64 +import codecs +from StringIO import StringIO + +class MockAtom(object): + """ Mocks an atom in the GData service. """ + def __init__(self, value): + self.text = value + +class MockEntry(object): + """ Mocks and entry returned from the GData service. """ + def __init__(self, title, ID): + self.title = MockAtom(title) + self.id = MockAtom('http://mock.example.com/%s' % ID) + self.ID = ID # simpler lookup for key value + + def GetEditMediaLink(self): + return MockLink() + + +class MockHTTPClient(object): + """ Mocks the functionality of an http client. """ + def request(*args, **kwargs): + pass + +class MockGDataService(object): + """ Provides the common functionality of a Google Service. """ + http_client = MockHTTPClient() + def __init__(self, email=None, password=None, + account_type='HOSTED_OR_GOOGLE', service=None, + auth_service_url=None, source=None, server=None, + additional_headers=None, handler=None, tokens=None, + http_client=None, token_store=None): + """ Create the Service with the default parameters. """ + self.email = email + self.password = password + self.account_type = account_type + self.service = service + self.auth_service_url = auth_service_url + self.server = server + self.login_token = None + + def GetClientLoginToken(self): + return self.login_token + + def SetClientLoginToken(self, token): + self.login_token = token + + def ClientLogin(self, username, password, account_type=None, service=None, + auth_service_url=None, source=None, captcha_token=None, + captcha_response=None): + + """ Client side login to the service. """ + if hasattr(self, '_login_err'): + raise self._login_err() + +class MockDocumentService(MockGDataService): + """ + Implements the minimum functionality of the Google Document service. + """ + + def Upload(self, media_source, title, folder_or_uri=None, label=None): + """ + Upload a document. + """ + if hasattr(self, '_upload_err'): + raise self._upload_err() + if not hasattr(self, '_upload_count'): + self._upload_count = 0 + # save the data for asserting against + self._upload_data = dict(media_source=media_source, title=title, + folder_or_uri=folder_or_uri, label=label) + self._upload_count += 1 + return MockEntry(title, 'mockentry%3A' + title) + + def QueryDocumentListFeed(self, uri): + if hasattr(self, '_listfeed'): + return self._listfeed + return MockListFeed() + + def CreateFolder(self, title, folder_or_uri=None): + if hasattr(self, '_create_folder_err'): + raise self._create_folder_err() + if hasattr(self, '_create_folder'): + return self._create_folder + return MockListEntry() + + def Put(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=3, media_source=None, + converter=None): + import gdata + self._put_data = None + if not hasattr(self, '_put_count'): + self._put_count = 0 + if hasattr(self, '_put_err'): + # allow for a list of errors + if type(self._put_err) == list: + put_err = self._put_err.pop(0) + if not len(self._put_err): + delattr(self, '_put_err') + else: + put_err = self._put_err + if type(put_err) == tuple: + raise put_err[0], put_err[1] + else: + raise put_err() + # save the data for asserting against + assert isinstance(data, basestring), \ + 'Should be a string' + self._put_data = cPickle.loads(base64.urlsafe_b64decode(data)) + self._put_count += 1 + return MockEntry('', 'mockentry%3A' + '') + + def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None): + if hasattr(self, '_export_err'): + raise self._export_err() + if hasattr(self, '_export_data'): + export_file = open(file_path, 'wb') + export_file.write(self._export_data) + export_file.close() + + def request(self, data, uri): + if hasattr(self, '_request_err'): + if type(self._request_err) == tuple: + raise self._request_err[0], self._request_err[1] + else: + raise self._request_err() + if hasattr(self, '_request_response'): + return MockHttpResponse(self._request_response) + +class MockHttpResponse(StringIO, object): + + def __init__(self, response_dict): + super(MockHttpResponse, self).__init__(response_dict.get('data', '')) + self.status = response_dict.get('status', 200) + self.reason = response_dict.get('reason', '') + +class MockListFeed(object): + + @property + def entry(self): + if hasattr(self, '_entry'): + return self._entry + return [] + +class MockListEntry(object): + + pass + +class MockLink(object): + + @property + def href(self): + return '' + +class MockContent(object): + + @property + def src(self): + return 'src' + +class MockDocumentListEntry(object): + + @property + def content(self): + return MockContent() + + def GetEditMediaLink(self): + return MockLink() + +class MockKeyczarReader(object): + + def __init__(self, location): + self.location = location + +class MockKeyczarEncryptedReader(object): + + def __init__(self, reader, crypter): + self._reader = reader + self._crypter = crypter + +class MockKeyczarReaders(object): + + @staticmethod + def CreateReader(location): + return MockKeyczarReader(location) + + @staticmethod + def EncryptedReader(reader, crypter): + return MockKeyczarEncryptedReader(reader, crypter) + +class MockKeyczarCrypter(object): + + def __init__(self, reader): + self.reader = reader + + @staticmethod + def Read(location): + return MockKeyczarCrypter(MockKeyczarReader(location)) + +class MockKeyczar(object): + + @property + def readers(self): + return MockKeyczarReaders + + @property + def Crypter(self): + return MockKeyczarCrypter diff --git a/awx/lib/site-packages/keyring/tests/py30compat.py b/awx/lib/site-packages/keyring/tests/py30compat.py new file mode 100644 index 0000000000..4b24c7fe40 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/py30compat.py @@ -0,0 +1,10 @@ +""" +Compatibility support for Python 3.0. Remove when Python 3.0 support is +no longer required. +""" +import sys + +if sys.version_info < (2,7) or sys.version_info[:2] == (3,0): + import unittest2 as unittest +else: + import unittest diff --git a/awx/lib/site-packages/keyring/tests/test_backend.py b/awx/lib/site-packages/keyring/tests/test_backend.py new file mode 100644 index 0000000000..4aa3874926 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/test_backend.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +""" +test_backend.py + +Test case for keyring basic function + +created by Kang Zhang 2009-07-14 +""" +from __future__ import with_statement + +import string + +from keyring.util import escape +from .util import random_string +from keyring import errors + +DIFFICULT_CHARS = string.whitespace + string.punctuation +# unicode only characters +# Sourced from The Quick Brown Fox... Pangrams +# http://www.columbia.edu/~fdc/utf8/ +UNICODE_CHARS = escape.u( + """זהכיףסת×לשמוע×יךתנצחקרפדעץטובבגן""" + """ξεσκεπάζωτηνψυχοφθόÏαβδελυγμία""" + """СъешьжеещёÑтихмÑгкихфранцузÑкихбулокдавыпейчаю""" + """ЖълтатадюлÑбешещаÑтливачепухъткойтоцъфназамръзнакатогьон""" +) + +# ensure no-ascii chars slip by - watch your editor! +assert min(ord(char) for char in UNICODE_CHARS) > 127 + +class BackendBasicTests(object): + """Test for the keyring's basic functions. password_set and password_get + """ + + def setUp(self): + self.keyring = self.init_keyring() + self.credentials_created = set() + + def tearDown(self): + for item in self.credentials_created: + self.keyring.delete_password(*item) + + def set_password(self, service, username, password): + # set the password and save the result so the test runner can clean + # up after if necessary. + self.keyring.set_password(service, username, password) + self.credentials_created.add((service, username)) + + def check_set_get(self, service, username, password): + keyring = self.keyring + + # for the non-existent password + self.assertEqual(keyring.get_password(service, username), None) + + # common usage + self.set_password(service, username, password) + self.assertEqual(keyring.get_password(service, username), password) + + # for the empty password + self.set_password(service, username, "") + self.assertEqual(keyring.get_password(service, username), "") + + def test_password_set_get(self): + password = random_string(20) + username = random_string(20) + service = random_string(20) + self.check_set_get(service, username, password) + + def test_difficult_chars(self): + password = random_string(20, DIFFICULT_CHARS) + username = random_string(20, DIFFICULT_CHARS) + service = random_string(20, DIFFICULT_CHARS) + self.check_set_get(service, username, password) + + def test_delete_present(self): + password = random_string(20, DIFFICULT_CHARS) + username = random_string(20, DIFFICULT_CHARS) + service = random_string(20, DIFFICULT_CHARS) + self.keyring.set_password(service, username, password) + self.keyring.delete_password(service, username) + self.assertTrue(self.keyring.get_password(service, username) is None) + + def test_delete_not_present(self): + username = random_string(20, DIFFICULT_CHARS) + service = random_string(20, DIFFICULT_CHARS) + self.assertRaises(errors.PasswordDeleteError, + self.keyring.delete_password, service, username) + + def test_unicode_chars(self): + password = random_string(20, UNICODE_CHARS) + username = random_string(20, UNICODE_CHARS) + service = random_string(20, UNICODE_CHARS) + self.check_set_get(service, username, password) + + def test_unicode_and_ascii_chars(self): + source = (random_string(10, UNICODE_CHARS) + random_string(10) + + random_string(10, DIFFICULT_CHARS)) + password = random_string(20, source) + username = random_string(20, source) + service = random_string(20, source) + self.check_set_get(service, username, password) + + def test_different_user(self): + """ + Issue #47 reports that WinVault isn't storing passwords for + multiple users. This test exercises that test for each of the + backends. + """ + + keyring = self.keyring + self.set_password('service1', 'user1', 'password1') + self.set_password('service1', 'user2', 'password2') + self.assertEqual(keyring.get_password('service1', 'user1'), + 'password1') + self.assertEqual(keyring.get_password('service1', 'user2'), + 'password2') + self.set_password('service2', 'user3', 'password3') + self.assertEqual(keyring.get_password('service1', 'user1'), + 'password1') diff --git a/awx/lib/site-packages/keyring/tests/test_cli.py b/awx/lib/site-packages/keyring/tests/test_cli.py new file mode 100644 index 0000000000..c49a144b7b --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/test_cli.py @@ -0,0 +1,151 @@ +""" +Test case to access the keyring from the command line +""" + +import os.path + +from keyring.tests.py30compat import unittest + +import keyring.backend +from keyring import cli +from keyring import errors + + +class FakeKeyring(keyring.backend.KeyringBackend): + PASSWORD = "GABUZOMEUH" + + def supported(self): + return 1 + + def set_password(self, service, username, password): + pass + + def get_password(self, service, username): + return self.PASSWORD + + def delete_password(self, service, username): + pass + +class SimpleKeyring(keyring.backend.KeyringBackend): + """A very simple keyring""" + + def __init__(self): + self.pwd = {} + + def supported(self): + return 1 + + def set_password(self, service, username, password): + self.pwd[(service, username)] = password + + def get_password(self, service, username): + try: + return self.pwd[(service, username)] + except KeyError: + return None + + def delete_password(self, service, username): + try: + del self.pwd[(service, username)] + except KeyError: + raise errors.PasswordDeleteError("No key") + +class CommandLineTestCase(unittest.TestCase): + def setUp(self): + self.old_keyring = keyring.get_keyring() + + self.cli = cli.CommandLineTool() + self.cli.input_password = self.return_password + self.cli.output_password = self.save_password + self.cli.parser.error = self.mock_error + self.cli.parser.print_help = lambda: None + + keyring.set_keyring(SimpleKeyring()) + + self.password = "" + self.password_returned = None + self.last_error = None + + def tearDown(self): + keyring.set_keyring(self.old_keyring) + + def return_password(self, *args, **kwargs): + return self.password + + def save_password(self, password): + self.password_returned = password + + def mock_error(self, error): + self.last_error = error + raise SystemExit() + + def test_wrong_arguments(self): + self.assertEqual(1, self.cli.run([])) + + self.assertRaises(SystemExit, self.cli.run, ["get"]) + self.assertRaises(SystemExit, self.cli.run, ["get", "foo"]) + self.assertRaises(SystemExit, self.cli.run, + ["get", "foo", "bar", "baz"]) + + self.assertRaises(SystemExit, self.cli.run, ["set"]) + self.assertRaises(SystemExit, self.cli.run, ["set", "foo"]) + self.assertRaises(SystemExit, self.cli.run, + ["set", "foo", "bar", "baz"]) + + self.assertRaises(SystemExit, self.cli.run, ["foo", "bar", "baz"]) + + def test_get_unexistent_password(self): + self.assertEqual(1, self.cli.run(["get", "foo", "bar"])) + self.assertEqual(None, self.password_returned) + + def test_set_and_get_password(self): + self.password = "plop" + self.assertEqual(0, self.cli.run(["set", "foo", "bar"])) + self.assertEqual(0, self.cli.run(["get", "foo", "bar"])) + self.assertEqual("plop", self.password_returned) + + def test_load_builtin_backend(self): + self.assertEqual(1, self.cli.run([ + "get", + "-b", "keyring.backends.file.PlaintextKeyring", + "foo", "bar"])) + backend = keyring.get_keyring() + self.assertTrue(isinstance(backend, + keyring.backends.file.PlaintextKeyring)) + + def test_load_specific_backend_with_path(self): + keyring_path = os.path.join(os.path.dirname(keyring.__file__), 'tests') + self.assertEqual(0, self.cli.run(["get", + "-b", "test_cli.FakeKeyring", + "-p", keyring_path, + "foo", "bar"])) + + backend = keyring.get_keyring() + # Somehow, this doesn't work, because the full dotted name of the class + # is not the same as the one expected :( + #self.assertTrue(isinstance(backend, FakeKeyring)) + self.assertEqual(FakeKeyring.PASSWORD, self.password_returned) + + def test_load_wrong_keyrings(self): + self.assertRaises(SystemExit, self.cli.run, + ["get", "foo", "bar", + "-b", "blablabla" # ImportError + ]) + self.assertRaises(SystemExit, self.cli.run, + ["get", "foo", "bar", + "-b", "os.path.blabla" # AttributeError + ]) + self.assertRaises(SystemExit, self.cli.run, + ["get", "foo", "bar", + "-b", "__builtin__.str" # TypeError + ]) + + +def test_suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(CommandLineTestCase)) + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest="test_suite") diff --git a/awx/lib/site-packages/keyring/tests/test_core.py b/awx/lib/site-packages/keyring/tests/test_core.py new file mode 100644 index 0000000000..7842b04e4f --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/test_core.py @@ -0,0 +1,155 @@ +""" +test_core.py + +Created by Kang Zhang on 2009-08-09 +""" + +from __future__ import with_statement + +import os +import tempfile +import shutil + +from keyring.tests.py30compat import unittest + +import mock + +import keyring.backend +import keyring.core +from keyring import errors + +PASSWORD_TEXT = "This is password" +PASSWORD_TEXT_2 = "This is password2" +KEYRINGRC = "keyringrc.cfg" + + +class TestKeyring(keyring.backend.KeyringBackend): + """A faked keyring for test. + """ + def __init__(self): + self.passwords = {} + + def supported(self): + return 0 + + def get_password(self, service, username): + return PASSWORD_TEXT + + def set_password(self, service, username, password): + self.passwords[(service, username)] = password + return 0 + + def delete_password(self, service, username): + try: + del self.passwords[(service, username)] + except KeyError: + raise errors.PasswordDeleteError("not set") + + +class TestKeyring2(TestKeyring): + """Another faked keyring for test. + """ + def get_password(self, service, username): + return PASSWORD_TEXT_2 + + +class CoreTestCase(unittest.TestCase): + mock_global_backend = mock.patch('keyring.core._keyring_backend') + + @mock_global_backend + def test_set_password(self, backend): + """ + set_password on the default keyring is called. + """ + keyring.core.set_password("test", "user", "passtest") + backend.set_password.assert_called_once_with('test', 'user', + 'passtest') + + @mock_global_backend + def test_get_password(self, backend): + """ + set_password on the default keyring is called. + """ + result = keyring.core.get_password("test", "user") + backend.get_password.assert_called_once_with('test', 'user') + assert result is not None + + @mock_global_backend + def test_delete_password(self, backend): + keyring.core.delete_password("test", "user") + backend.delete_password.assert_called_once_with('test', 'user') + + def test_set_keyring_in_runtime(self): + """Test the function of set keyring in runtime. + """ + keyring.core.set_keyring(TestKeyring()) + + keyring.core.set_password("test", "user", "password") + self.assertEqual(keyring.core.get_password("test", "user"), + PASSWORD_TEXT) + + def test_set_keyring_in_config(self): + """Test setting the keyring by config file. + """ + # create the config file + config_file = open(KEYRINGRC, 'w') + config_file.writelines([ + "[backend]\n", + # the path for the user created keyring + "keyring-path= %s\n" % os.path.dirname(os.path.abspath(__file__)), + # the name of the keyring class + "default-keyring=test_core.TestKeyring2\n", + ]) + config_file.close() + + # init the keyring lib, the lib will automaticlly load the + # config file and load the user defined module + keyring.core.init_backend() + + keyring.core.set_password("test", "user", "password") + self.assertEqual(keyring.core.get_password("test", "user"), + PASSWORD_TEXT_2) + + os.remove(KEYRINGRC) + + def test_load_config(self): + tempdir = tempfile.mkdtemp() + old_location = os.getcwd() + os.chdir(tempdir) + personal_cfg = os.path.join(os.path.expanduser("~"), "keyringrc.cfg") + if os.path.exists(personal_cfg): + os.rename(personal_cfg, personal_cfg + '.old') + personal_renamed = True + else: + personal_renamed = False + + # loading with an empty environment + keyring.core.load_config() + + # loading with a file that doesn't have a backend section + cfg = os.path.join(tempdir, "keyringrc.cfg") + f = open(cfg, 'w') + f.write('[keyring]') + f.close() + keyring.core.load_config() + + # loading with a file that doesn't have a default-keyring value + cfg = os.path.join(tempdir, "keyringrc.cfg") + f = open(cfg, 'w') + f.write('[backend]') + f.close() + keyring.core.load_config() + + os.chdir(old_location) + shutil.rmtree(tempdir) + if personal_renamed: + os.rename(personal_cfg + '.old', personal_cfg) + + +def test_suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(CoreTestCase)) + return suite + +if __name__ == "__main__": + unittest.main(defaultTest="test_suite") diff --git a/awx/lib/site-packages/keyring/tests/test_util.py b/awx/lib/site-packages/keyring/tests/test_util.py new file mode 100644 index 0000000000..339c242646 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/test_util.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +""" +Test for simple escape/unescape routine +""" + + +from .py30compat import unittest + +from keyring.util import escape + + +class EscapeTestCase(unittest.TestCase): + + def check_escape_unescape(self, initial): + escaped = escape.escape(initial) + self.assertTrue(all(c in (escape.LEGAL_CHARS + '_') for c in escaped)) + unescaped = escape.unescape(escaped) + self.assertEqual(initial, unescaped) + + def test_escape_unescape(self): + self.check_escape_unescape("aaaa") + self.check_escape_unescape("aaaa bbbb cccc") + self.check_escape_unescape(escape.u("Zażółć gęślÄ… jaźń")) + self.check_escape_unescape("(((P{{{{'''---; ;; '\"|%^") + + def test_low_byte(self): + """ + Ensure that encoding low bytes (ordinal less than hex F) encode as + as three bytes to avoid ambiguity. For example '\n' (hex A) should + encode as '_0A' and not '_A', the latter of which + isn't matched by the inverse operation. + """ + self.check_escape_unescape('\n') + self.check_escape_unescape('\x000') + +def test_suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(EscapeTestCase)) + return suite + +if __name__ == "__main__": + unittest.main(defaultTest="test_suite") diff --git a/awx/lib/site-packages/keyring/tests/util.py b/awx/lib/site-packages/keyring/tests/util.py new file mode 100644 index 0000000000..1fcce03857 --- /dev/null +++ b/awx/lib/site-packages/keyring/tests/util.py @@ -0,0 +1,63 @@ +import contextlib +import os +import sys +import random +import string + +class ImportKiller(object): + "Context manager to make an import of a given name or names fail." + def __init__(self, *names): + self.names = names + def find_module(self, fullname, path=None): + if fullname in self.names: + return self + def load_module(self, fullname): + assert fullname in self.names + raise ImportError(fullname) + def __enter__(self): + self.original = {} + for name in self.names: + self.original[name] = sys.modules.pop(name, None) + sys.meta_path.insert(0, self) + def __exit__(self, *args): + sys.meta_path.remove(self) + for key, value in self.original.items(): + if value is not None: + sys.modules[key] = value + +@contextlib.contextmanager +def NoNoneDictMutator(destination, **changes): + """Helper context manager to make and unmake changes to a dict. + + A None is not a valid value for the destination, and so means that the + associated name should be removed.""" + original = {} + for key, value in changes.items(): + original[key] = destination.get(key) + if value is None: + if key in destination: + del destination[key] + else: + destination[key] = value + yield + for key, value in original.items(): + if value is None: + if key in destination: + del destination[key] + else: + destination[key] = value + + +def Environ(**changes): + """A context manager to temporarily change the os.environ""" + return NoNoneDictMutator(os.environ, **changes) + +ALPHABET = string.ascii_letters + string.digits + +def random_string(k, source = ALPHABET): + """Generate a random string with length k + """ + result = '' + for i in range(0, k): + result += random.choice(source) + return result diff --git a/awx/lib/site-packages/keyring/util/__init__.py b/awx/lib/site-packages/keyring/util/__init__.py new file mode 100644 index 0000000000..d67d8ab327 --- /dev/null +++ b/awx/lib/site-packages/keyring/util/__init__.py @@ -0,0 +1,39 @@ +try: + import functools +except ImportError: + # functools not available until Python 2.5 + pass + +def once(func): + """ + Decorate func so it's only ever called the first time. + + This decorator can ensure that an expensive or non-idempotent function + will not be expensive on subsequent calls and is idempotent. + + >>> func = once(lambda a: a+3) + >>> func(3) + 6 + >>> func(9) + 6 + >>> func('12') + 6 + """ + def wrapper(*args, **kwargs): + if not hasattr(func, 'always_returns'): + func.always_returns = func(*args, **kwargs) + return func.always_returns + if 'functools' in globals(): + wrapper = functools.wraps(func)(wrapper) + return wrapper + +def suppress_exceptions(callables, exceptions=Exception): + """ + yield the results of calling each element of callables, suppressing + any indicated exceptions. + """ + for callable in callables: + try: + yield callable() + except exceptions: + pass diff --git a/awx/lib/site-packages/keyring/util/escape.py b/awx/lib/site-packages/keyring/util/escape.py new file mode 100644 index 0000000000..41dd439597 --- /dev/null +++ b/awx/lib/site-packages/keyring/util/escape.py @@ -0,0 +1,60 @@ +""" +escape/unescape routines available for backends which need +alphanumeric usernames, services, or other values +""" + +import re +import string +import sys + +# True if we are running on Python 3. +# taken from six.py +PY3 = sys.version_info[0] == 3 + +# allow use of unicode literals +if PY3: + def u(s): + return s + def _unichr(c): + return chr(c) +else: + def u(s): + return unicode(s, "unicode_escape") + def _unichr(c): + return unichr(c) + +LEGAL_CHARS = ( + getattr(string, 'letters', None) # Python 2 + or getattr(string, 'ascii_letters') # Python 3 +) + string.digits + +ESCAPE_FMT = "_%02X" + +def _escape_char(c): + "Single char escape. Return the char, escaped if not already legal" + if isinstance(c, int): + c = _unichr(c) + return c if c in LEGAL_CHARS else ESCAPE_FMT % ord(c) + +def escape(value): + """ + Escapes given string so the result consists of alphanumeric chars and + underscore only. + """ + return "".join(_escape_char(c) for c in value.encode('utf-8')) + +def _unescape_code(regex_match): + ordinal = int(regex_match.group('code'), 16) + if sys.version_info >= (3,): + return bytes([ordinal]) + return chr(ordinal) + +def unescape(value): + """ + Inverse of escape. + """ + re_esc = re.compile( + # the pattern must be bytes to operate on bytes + ESCAPE_FMT.replace('%02X', '(?P[0-9A-F]{2})').encode('ascii') + ) + return re_esc.sub(_unescape_code, value.encode('ascii')).decode('utf-8') diff --git a/awx/lib/site-packages/keyring/util/platform_.py b/awx/lib/site-packages/keyring/util/platform_.py new file mode 100644 index 0000000000..eceb39c230 --- /dev/null +++ b/awx/lib/site-packages/keyring/util/platform_.py @@ -0,0 +1,24 @@ +from __future__ import absolute_import + +import os +import platform + +def _data_root_Windows(): + try: + root = os.environ['LOCALAPPDATA'] + except KeyError: + # Windows XP + root = os.path.join(os.environ['USERPROFILE'], 'Local Settings') + return os.path.join(root, 'Python Keyring') + +def _data_root_Linux(): + """ + Use freedesktop.org Base Dir Specfication to determine storage + location. + """ + fallback = os.path.expanduser('~/.local/share') + root = os.environ.get('XDG_DATA_HOME', None) or fallback + return os.path.join(root, 'python_keyring') + +# by default, use Unix convention +data_root = globals().get('_data_root_' + platform.system(), _data_root_Linux) diff --git a/awx/lib/site-packages/keyring/util/properties.py b/awx/lib/site-packages/keyring/util/properties.py new file mode 100644 index 0000000000..dee0a73bf0 --- /dev/null +++ b/awx/lib/site-packages/keyring/util/properties.py @@ -0,0 +1,50 @@ +class ClassProperty(property): + """ + An implementation of a property callable on a class. Used to decorate a + classmethod but to then treat it like a property. + + Example: + + >>> class MyClass: + ... @ClassProperty + ... @classmethod + ... def skillz(cls): + ... return cls.__name__.startswith('My') + >>> MyClass.skillz + True + >>> class YourClass(MyClass): pass + >>> YourClass.skillz + False + """ + def __get__(self, cls, owner): + return self.fget.__get__(None, owner)() + +# borrowed from jaraco.util.dictlib +class NonDataProperty(object): + """Much like the property builtin, but only implements __get__, + making it a non-data property, and can be subsequently reset. + + See http://users.rcn.com/python/download/Descriptor.htm for more + information. + + >>> class X(object): + ... @NonDataProperty + ... def foo(self): + ... return 3 + >>> x = X() + >>> x.foo + 3 + >>> x.foo = 4 + >>> x.foo + 4 + """ + + def __init__(self, fget): + assert fget is not None, "fget cannot be none" + assert callable(fget), "fget must be callable" + self.fget = fget + + def __get__(self, obj, objtype=None): + if obj is None: + return self + return self.fget(obj) diff --git a/awx/lib/site-packages/mock.py b/awx/lib/site-packages/mock.py new file mode 100644 index 0000000000..c8fc5d1d25 --- /dev/null +++ b/awx/lib/site-packages/mock.py @@ -0,0 +1,2367 @@ +# mock.py +# Test tools for mocking and patching. +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk + +# mock 1.0 +# http://www.voidspace.org.uk/python/mock/ + +# Released subject to the BSD License +# Please see http://www.voidspace.org.uk/python/license.shtml + +# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml +# Comments, suggestions and bug reports welcome. + + +__all__ = ( + 'Mock', + 'MagicMock', + 'patch', + 'sentinel', + 'DEFAULT', + 'ANY', + 'call', + 'create_autospec', + 'FILTER_DIR', + 'NonCallableMock', + 'NonCallableMagicMock', + 'mock_open', + 'PropertyMock', +) + + +__version__ = '1.0.1' + + +import pprint +import sys + +try: + import inspect +except ImportError: + # for alternative platforms that + # may not have inspect + inspect = None + +try: + from functools import wraps as original_wraps +except ImportError: + # Python 2.4 compatibility + def wraps(original): + def inner(f): + f.__name__ = original.__name__ + f.__doc__ = original.__doc__ + f.__module__ = original.__module__ + f.__wrapped__ = original + return f + return inner +else: + if sys.version_info[:2] >= (3, 3): + wraps = original_wraps + else: + def wraps(func): + def inner(f): + f = original_wraps(func)(f) + f.__wrapped__ = func + return f + return inner + +try: + unicode +except NameError: + # Python 3 + basestring = unicode = str + +try: + long +except NameError: + # Python 3 + long = int + +try: + BaseException +except NameError: + # Python 2.4 compatibility + BaseException = Exception + +try: + next +except NameError: + def next(obj): + return obj.next() + + +BaseExceptions = (BaseException,) +if 'java' in sys.platform: + # jython + import java + BaseExceptions = (BaseException, java.lang.Throwable) + +try: + _isidentifier = str.isidentifier +except AttributeError: + # Python 2.X + import keyword + import re + regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) + def _isidentifier(string): + if string in keyword.kwlist: + return False + return regex.match(string) + + +inPy3k = sys.version_info[0] == 3 + +# Needed to work around Python 3 bug where use of "super" interferes with +# defining __class__ as a descriptor +_super = super + +self = 'im_self' +builtin = '__builtin__' +if inPy3k: + self = '__self__' + builtin = 'builtins' + +FILTER_DIR = True + + +def _is_instance_mock(obj): + # can't use isinstance on Mock objects because they override __class__ + # The base class for all mocks is NonCallableMock + return issubclass(type(obj), NonCallableMock) + + +def _is_exception(obj): + return ( + isinstance(obj, BaseExceptions) or + isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions) + ) + + +class _slotted(object): + __slots__ = ['a'] + + +DescriptorTypes = ( + type(_slotted.a), + property, +) + + +def _getsignature(func, skipfirst, instance=False): + if inspect is None: + raise ImportError('inspect module not available') + + if isinstance(func, ClassTypes) and not instance: + try: + func = func.__init__ + except AttributeError: + return + skipfirst = True + elif not isinstance(func, FunctionTypes): + # for classes where instance is True we end up here too + try: + func = func.__call__ + except AttributeError: + return + + if inPy3k: + try: + argspec = inspect.getfullargspec(func) + except TypeError: + # C function / method, possibly inherited object().__init__ + return + regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec + else: + try: + regargs, varargs, varkwargs, defaults = inspect.getargspec(func) + except TypeError: + # C function / method, possibly inherited object().__init__ + return + + # instance methods and classmethods need to lose the self argument + if getattr(func, self, None) is not None: + regargs = regargs[1:] + if skipfirst: + # this condition and the above one are never both True - why? + regargs = regargs[1:] + + if inPy3k: + signature = inspect.formatargspec( + regargs, varargs, varkw, defaults, + kwonly, kwonlydef, ann, formatvalue=lambda value: "") + else: + signature = inspect.formatargspec( + regargs, varargs, varkwargs, defaults, + formatvalue=lambda value: "") + return signature[1:-1], func + + +def _check_signature(func, mock, skipfirst, instance=False): + if not _callable(func): + return + + result = _getsignature(func, skipfirst, instance) + if result is None: + return + signature, func = result + + # can't use self because "self" is common as an argument name + # unfortunately even not in the first place + src = "lambda _mock_self, %s: None" % signature + checksig = eval(src, {}) + _copy_func_details(func, checksig) + type(mock)._mock_check_sig = checksig + + +def _copy_func_details(func, funcopy): + funcopy.__name__ = func.__name__ + funcopy.__doc__ = func.__doc__ + #funcopy.__dict__.update(func.__dict__) + funcopy.__module__ = func.__module__ + if not inPy3k: + funcopy.func_defaults = func.func_defaults + return + funcopy.__defaults__ = func.__defaults__ + funcopy.__kwdefaults__ = func.__kwdefaults__ + + +def _callable(obj): + if isinstance(obj, ClassTypes): + return True + if getattr(obj, '__call__', None) is not None: + return True + return False + + +def _is_list(obj): + # checks for list or tuples + # XXXX badly named! + return type(obj) in (list, tuple) + + +def _instance_callable(obj): + """Given an object, return True if the object is callable. + For classes, return True if instances would be callable.""" + if not isinstance(obj, ClassTypes): + # already an instance + return getattr(obj, '__call__', None) is not None + + klass = obj + # uses __bases__ instead of __mro__ so that we work with old style classes + if klass.__dict__.get('__call__') is not None: + return True + + for base in klass.__bases__: + if _instance_callable(base): + return True + return False + + +def _set_signature(mock, original, instance=False): + # creates a function with signature (*args, **kwargs) that delegates to a + # mock. It still does signature checking by calling a lambda with the same + # signature as the original. + if not _callable(original): + return + + skipfirst = isinstance(original, ClassTypes) + result = _getsignature(original, skipfirst, instance) + if result is None: + # was a C function (e.g. object().__init__ ) that can't be mocked + return + + signature, func = result + + src = "lambda %s: None" % signature + checksig = eval(src, {}) + _copy_func_details(func, checksig) + + name = original.__name__ + if not _isidentifier(name): + name = 'funcopy' + context = {'_checksig_': checksig, 'mock': mock} + src = """def %s(*args, **kwargs): + _checksig_(*args, **kwargs) + return mock(*args, **kwargs)""" % name + exec (src, context) + funcopy = context[name] + _setup_func(funcopy, mock) + return funcopy + + +def _setup_func(funcopy, mock): + funcopy.mock = mock + + # can't use isinstance with mocks + if not _is_instance_mock(mock): + return + + def assert_called_with(*args, **kwargs): + return mock.assert_called_with(*args, **kwargs) + def assert_called_once_with(*args, **kwargs): + return mock.assert_called_once_with(*args, **kwargs) + def assert_has_calls(*args, **kwargs): + return mock.assert_has_calls(*args, **kwargs) + def assert_any_call(*args, **kwargs): + return mock.assert_any_call(*args, **kwargs) + def reset_mock(): + funcopy.method_calls = _CallList() + funcopy.mock_calls = _CallList() + mock.reset_mock() + ret = funcopy.return_value + if _is_instance_mock(ret) and not ret is mock: + ret.reset_mock() + + funcopy.called = False + funcopy.call_count = 0 + funcopy.call_args = None + funcopy.call_args_list = _CallList() + funcopy.method_calls = _CallList() + funcopy.mock_calls = _CallList() + + funcopy.return_value = mock.return_value + funcopy.side_effect = mock.side_effect + funcopy._mock_children = mock._mock_children + + funcopy.assert_called_with = assert_called_with + funcopy.assert_called_once_with = assert_called_once_with + funcopy.assert_has_calls = assert_has_calls + funcopy.assert_any_call = assert_any_call + funcopy.reset_mock = reset_mock + + mock._mock_delegate = funcopy + + +def _is_magic(name): + return '__%s__' % name[2:-2] == name + + +class _SentinelObject(object): + "A unique, named, sentinel object." + def __init__(self, name): + self.name = name + + def __repr__(self): + return 'sentinel.%s' % self.name + + +class _Sentinel(object): + """Access attributes to return a named object, usable as a sentinel.""" + def __init__(self): + self._sentinels = {} + + def __getattr__(self, name): + if name == '__bases__': + # Without this help(mock) raises an exception + raise AttributeError + return self._sentinels.setdefault(name, _SentinelObject(name)) + + +sentinel = _Sentinel() + +DEFAULT = sentinel.DEFAULT +_missing = sentinel.MISSING +_deleted = sentinel.DELETED + + +class OldStyleClass: + pass +ClassType = type(OldStyleClass) + + +def _copy(value): + if type(value) in (dict, list, tuple, set): + return type(value)(value) + return value + + +ClassTypes = (type,) +if not inPy3k: + ClassTypes = (type, ClassType) + +_allowed_names = set( + [ + 'return_value', '_mock_return_value', 'side_effect', + '_mock_side_effect', '_mock_parent', '_mock_new_parent', + '_mock_name', '_mock_new_name' + ] +) + + +def _delegating_property(name): + _allowed_names.add(name) + _the_name = '_mock_' + name + def _get(self, name=name, _the_name=_the_name): + sig = self._mock_delegate + if sig is None: + return getattr(self, _the_name) + return getattr(sig, name) + def _set(self, value, name=name, _the_name=_the_name): + sig = self._mock_delegate + if sig is None: + self.__dict__[_the_name] = value + else: + setattr(sig, name, value) + + return property(_get, _set) + + + +class _CallList(list): + + def __contains__(self, value): + if not isinstance(value, list): + return list.__contains__(self, value) + len_value = len(value) + len_self = len(self) + if len_value > len_self: + return False + + for i in range(0, len_self - len_value + 1): + sub_list = self[i:i+len_value] + if sub_list == value: + return True + return False + + def __repr__(self): + return pprint.pformat(list(self)) + + +def _check_and_set_parent(parent, value, name, new_name): + if not _is_instance_mock(value): + return False + if ((value._mock_name or value._mock_new_name) or + (value._mock_parent is not None) or + (value._mock_new_parent is not None)): + return False + + _parent = parent + while _parent is not None: + # setting a mock (value) as a child or return value of itself + # should not modify the mock + if _parent is value: + return False + _parent = _parent._mock_new_parent + + if new_name: + value._mock_new_parent = parent + value._mock_new_name = new_name + if name: + value._mock_parent = parent + value._mock_name = name + return True + + + +class Base(object): + _mock_return_value = DEFAULT + _mock_side_effect = None + def __init__(self, *args, **kwargs): + pass + + + +class NonCallableMock(Base): + """A non-callable version of `Mock`""" + + def __new__(cls, *args, **kw): + # every instance has its own class + # so we can create magic methods on the + # class without stomping on other mocks + new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__}) + instance = object.__new__(new) + return instance + + + def __init__( + self, spec=None, wraps=None, name=None, spec_set=None, + parent=None, _spec_state=None, _new_name='', _new_parent=None, + **kwargs + ): + if _new_parent is None: + _new_parent = parent + + __dict__ = self.__dict__ + __dict__['_mock_parent'] = parent + __dict__['_mock_name'] = name + __dict__['_mock_new_name'] = _new_name + __dict__['_mock_new_parent'] = _new_parent + + if spec_set is not None: + spec = spec_set + spec_set = True + + self._mock_add_spec(spec, spec_set) + + __dict__['_mock_children'] = {} + __dict__['_mock_wraps'] = wraps + __dict__['_mock_delegate'] = None + + __dict__['_mock_called'] = False + __dict__['_mock_call_args'] = None + __dict__['_mock_call_count'] = 0 + __dict__['_mock_call_args_list'] = _CallList() + __dict__['_mock_mock_calls'] = _CallList() + + __dict__['method_calls'] = _CallList() + + if kwargs: + self.configure_mock(**kwargs) + + _super(NonCallableMock, self).__init__( + spec, wraps, name, spec_set, parent, + _spec_state + ) + + + def attach_mock(self, mock, attribute): + """ + Attach a mock as an attribute of this one, replacing its name and + parent. Calls to the attached mock will be recorded in the + `method_calls` and `mock_calls` attributes of this one.""" + mock._mock_parent = None + mock._mock_new_parent = None + mock._mock_name = '' + mock._mock_new_name = None + + setattr(self, attribute, mock) + + + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + + + def _mock_add_spec(self, spec, spec_set): + _spec_class = None + + if spec is not None and not _is_list(spec): + if isinstance(spec, ClassTypes): + _spec_class = spec + else: + _spec_class = _get_class(spec) + + spec = dir(spec) + + __dict__ = self.__dict__ + __dict__['_spec_class'] = _spec_class + __dict__['_spec_set'] = spec_set + __dict__['_mock_methods'] = spec + + + def __get_return_value(self): + ret = self._mock_return_value + if self._mock_delegate is not None: + ret = self._mock_delegate.return_value + + if ret is DEFAULT: + ret = self._get_child_mock( + _new_parent=self, _new_name='()' + ) + self.return_value = ret + return ret + + + def __set_return_value(self, value): + if self._mock_delegate is not None: + self._mock_delegate.return_value = value + else: + self._mock_return_value = value + _check_and_set_parent(self, value, None, '()') + + __return_value_doc = "The value to be returned when the mock is called." + return_value = property(__get_return_value, __set_return_value, + __return_value_doc) + + + @property + def __class__(self): + if self._spec_class is None: + return type(self) + return self._spec_class + + called = _delegating_property('called') + call_count = _delegating_property('call_count') + call_args = _delegating_property('call_args') + call_args_list = _delegating_property('call_args_list') + mock_calls = _delegating_property('mock_calls') + + + def __get_side_effect(self): + sig = self._mock_delegate + if sig is None: + return self._mock_side_effect + return sig.side_effect + + def __set_side_effect(self, value): + value = _try_iter(value) + sig = self._mock_delegate + if sig is None: + self._mock_side_effect = value + else: + sig.side_effect = value + + side_effect = property(__get_side_effect, __set_side_effect) + + + def reset_mock(self): + "Restore the mock object to its initial state." + self.called = False + self.call_args = None + self.call_count = 0 + self.mock_calls = _CallList() + self.call_args_list = _CallList() + self.method_calls = _CallList() + + for child in self._mock_children.values(): + if isinstance(child, _SpecState): + continue + child.reset_mock() + + ret = self._mock_return_value + if _is_instance_mock(ret) and ret is not self: + ret.reset_mock() + + + def configure_mock(self, **kwargs): + """Set attributes on the mock through keyword arguments. + + Attributes plus return values and side effects can be set on child + mocks using standard dot notation and unpacking a dictionary in the + method call: + + >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} + >>> mock.configure_mock(**attrs)""" + for arg, val in sorted(kwargs.items(), + # we sort on the number of dots so that + # attributes are set before we set attributes on + # attributes + key=lambda entry: entry[0].count('.')): + args = arg.split('.') + final = args.pop() + obj = self + for entry in args: + obj = getattr(obj, entry) + setattr(obj, final, val) + + + def __getattr__(self, name): + if name == '_mock_methods': + raise AttributeError(name) + elif self._mock_methods is not None: + if name not in self._mock_methods or name in _all_magics: + raise AttributeError("Mock object has no attribute %r" % name) + elif _is_magic(name): + raise AttributeError(name) + + result = self._mock_children.get(name) + if result is _deleted: + raise AttributeError(name) + elif result is None: + wraps = None + if self._mock_wraps is not None: + # XXXX should we get the attribute without triggering code + # execution? + wraps = getattr(self._mock_wraps, name) + + result = self._get_child_mock( + parent=self, name=name, wraps=wraps, _new_name=name, + _new_parent=self + ) + self._mock_children[name] = result + + elif isinstance(result, _SpecState): + result = create_autospec( + result.spec, result.spec_set, result.instance, + result.parent, result.name + ) + self._mock_children[name] = result + + return result + + + def __repr__(self): + _name_list = [self._mock_new_name] + _parent = self._mock_new_parent + last = self + + dot = '.' + if _name_list == ['()']: + dot = '' + seen = set() + while _parent is not None: + last = _parent + + _name_list.append(_parent._mock_new_name + dot) + dot = '.' + if _parent._mock_new_name == '()': + dot = '' + + _parent = _parent._mock_new_parent + + # use ids here so as not to call __hash__ on the mocks + if id(_parent) in seen: + break + seen.add(id(_parent)) + + _name_list = list(reversed(_name_list)) + _first = last._mock_name or 'mock' + if len(_name_list) > 1: + if _name_list[1] not in ('()', '().'): + _first += '.' + _name_list[0] = _first + name = ''.join(_name_list) + + name_string = '' + if name not in ('mock', 'mock.'): + name_string = ' name=%r' % name + + spec_string = '' + if self._spec_class is not None: + spec_string = ' spec=%r' + if self._spec_set: + spec_string = ' spec_set=%r' + spec_string = spec_string % self._spec_class.__name__ + return "<%s%s%s id='%s'>" % ( + type(self).__name__, + name_string, + spec_string, + id(self) + ) + + + def __dir__(self): + """Filter the output of `dir(mock)` to only useful members. + XXXX + """ + extras = self._mock_methods or [] + from_type = dir(type(self)) + from_dict = list(self.__dict__) + + if FILTER_DIR: + from_type = [e for e in from_type if not e.startswith('_')] + from_dict = [e for e in from_dict if not e.startswith('_') or + _is_magic(e)] + return sorted(set(extras + from_type + from_dict + + list(self._mock_children))) + + + def __setattr__(self, name, value): + if name in _allowed_names: + # property setters go through here + return object.__setattr__(self, name, value) + elif (self._spec_set and self._mock_methods is not None and + name not in self._mock_methods and + name not in self.__dict__): + raise AttributeError("Mock object has no attribute '%s'" % name) + elif name in _unsupported_magics: + msg = 'Attempting to set unsupported magic method %r.' % name + raise AttributeError(msg) + elif name in _all_magics: + if self._mock_methods is not None and name not in self._mock_methods: + raise AttributeError("Mock object has no attribute '%s'" % name) + + if not _is_instance_mock(value): + setattr(type(self), name, _get_method(name, value)) + original = value + value = lambda *args, **kw: original(self, *args, **kw) + else: + # only set _new_name and not name so that mock_calls is tracked + # but not method calls + _check_and_set_parent(self, value, None, name) + setattr(type(self), name, value) + self._mock_children[name] = value + elif name == '__class__': + self._spec_class = value + return + else: + if _check_and_set_parent(self, value, name, name): + self._mock_children[name] = value + return object.__setattr__(self, name, value) + + + def __delattr__(self, name): + if name in _all_magics and name in type(self).__dict__: + delattr(type(self), name) + if name not in self.__dict__: + # for magic methods that are still MagicProxy objects and + # not set on the instance itself + return + + if name in self.__dict__: + object.__delattr__(self, name) + + obj = self._mock_children.get(name, _missing) + if obj is _deleted: + raise AttributeError(name) + if obj is not _missing: + del self._mock_children[name] + self._mock_children[name] = _deleted + + + + def _format_mock_call_signature(self, args, kwargs): + name = self._mock_name or 'mock' + return _format_call_signature(name, args, kwargs) + + + def _format_mock_failure_message(self, args, kwargs): + message = 'Expected call: %s\nActual call: %s' + expected_string = self._format_mock_call_signature(args, kwargs) + call_args = self.call_args + if len(call_args) == 3: + call_args = call_args[1:] + actual_string = self._format_mock_call_signature(*call_args) + return message % (expected_string, actual_string) + + + def assert_called_with(_mock_self, *args, **kwargs): + """assert that the mock was called with the specified arguments. + + Raises an AssertionError if the args and keyword args passed in are + different to the last call to the mock.""" + self = _mock_self + if self.call_args is None: + expected = self._format_mock_call_signature(args, kwargs) + raise AssertionError('Expected call: %s\nNot called' % (expected,)) + + if self.call_args != (args, kwargs): + msg = self._format_mock_failure_message(args, kwargs) + raise AssertionError(msg) + + + def assert_called_once_with(_mock_self, *args, **kwargs): + """assert that the mock was called exactly once and with the specified + arguments.""" + self = _mock_self + if not self.call_count == 1: + msg = ("Expected to be called once. Called %s times." % + self.call_count) + raise AssertionError(msg) + return self.assert_called_with(*args, **kwargs) + + + def assert_has_calls(self, calls, any_order=False): + """assert the mock has been called with the specified calls. + The `mock_calls` list is checked for the calls. + + If `any_order` is False (the default) then the calls must be + sequential. There can be extra calls before or after the + specified calls. + + If `any_order` is True then the calls can be in any order, but + they must all appear in `mock_calls`.""" + if not any_order: + if calls not in self.mock_calls: + raise AssertionError( + 'Calls not found.\nExpected: %r\n' + 'Actual: %r' % (calls, self.mock_calls) + ) + return + + all_calls = list(self.mock_calls) + + not_found = [] + for kall in calls: + try: + all_calls.remove(kall) + except ValueError: + not_found.append(kall) + if not_found: + raise AssertionError( + '%r not all found in call list' % (tuple(not_found),) + ) + + + def assert_any_call(self, *args, **kwargs): + """assert the mock has been called with the specified arguments. + + The assert passes if the mock has *ever* been called, unlike + `assert_called_with` and `assert_called_once_with` that only pass if + the call is the most recent one.""" + kall = call(*args, **kwargs) + if kall not in self.call_args_list: + expected_string = self._format_mock_call_signature(args, kwargs) + raise AssertionError( + '%s call not found' % expected_string + ) + + + def _get_child_mock(self, **kw): + """Create the child mocks for attributes and return value. + By default child mocks will be the same type as the parent. + Subclasses of Mock may want to override this to customize the way + child mocks are made. + + For non-callable mocks the callable variant will be used (rather than + any custom subclass).""" + _type = type(self) + if not issubclass(_type, CallableMixin): + if issubclass(_type, NonCallableMagicMock): + klass = MagicMock + elif issubclass(_type, NonCallableMock) : + klass = Mock + else: + klass = _type.__mro__[1] + return klass(**kw) + + + +def _try_iter(obj): + if obj is None: + return obj + if _is_exception(obj): + return obj + if _callable(obj): + return obj + try: + return iter(obj) + except TypeError: + # XXXX backwards compatibility + # but this will blow up on first call - so maybe we should fail early? + return obj + + + +class CallableMixin(Base): + + def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, + wraps=None, name=None, spec_set=None, parent=None, + _spec_state=None, _new_name='', _new_parent=None, **kwargs): + self.__dict__['_mock_return_value'] = return_value + + _super(CallableMixin, self).__init__( + spec, wraps, name, spec_set, parent, + _spec_state, _new_name, _new_parent, **kwargs + ) + + self.side_effect = side_effect + + + def _mock_check_sig(self, *args, **kwargs): + # stub method that can be replaced with one with a specific signature + pass + + + def __call__(_mock_self, *args, **kwargs): + # can't use self in-case a function / method we are mocking uses self + # in the signature + _mock_self._mock_check_sig(*args, **kwargs) + return _mock_self._mock_call(*args, **kwargs) + + + def _mock_call(_mock_self, *args, **kwargs): + self = _mock_self + self.called = True + self.call_count += 1 + self.call_args = _Call((args, kwargs), two=True) + self.call_args_list.append(_Call((args, kwargs), two=True)) + + _new_name = self._mock_new_name + _new_parent = self._mock_new_parent + self.mock_calls.append(_Call(('', args, kwargs))) + + seen = set() + skip_next_dot = _new_name == '()' + do_method_calls = self._mock_parent is not None + name = self._mock_name + while _new_parent is not None: + this_mock_call = _Call((_new_name, args, kwargs)) + if _new_parent._mock_new_name: + dot = '.' + if skip_next_dot: + dot = '' + + skip_next_dot = False + if _new_parent._mock_new_name == '()': + skip_next_dot = True + + _new_name = _new_parent._mock_new_name + dot + _new_name + + if do_method_calls: + if _new_name == name: + this_method_call = this_mock_call + else: + this_method_call = _Call((name, args, kwargs)) + _new_parent.method_calls.append(this_method_call) + + do_method_calls = _new_parent._mock_parent is not None + if do_method_calls: + name = _new_parent._mock_name + '.' + name + + _new_parent.mock_calls.append(this_mock_call) + _new_parent = _new_parent._mock_new_parent + + # use ids here so as not to call __hash__ on the mocks + _new_parent_id = id(_new_parent) + if _new_parent_id in seen: + break + seen.add(_new_parent_id) + + ret_val = DEFAULT + effect = self.side_effect + if effect is not None: + if _is_exception(effect): + raise effect + + if not _callable(effect): + result = next(effect) + if _is_exception(result): + raise result + return result + + ret_val = effect(*args, **kwargs) + if ret_val is DEFAULT: + ret_val = self.return_value + + if (self._mock_wraps is not None and + self._mock_return_value is DEFAULT): + return self._mock_wraps(*args, **kwargs) + if ret_val is DEFAULT: + ret_val = self.return_value + return ret_val + + + +class Mock(CallableMixin, NonCallableMock): + """ + Create a new `Mock` object. `Mock` takes several optional arguments + that specify the behaviour of the Mock object: + + * `spec`: This can be either a list of strings or an existing object (a + class or instance) that acts as the specification for the mock object. If + you pass in an object then a list of strings is formed by calling dir on + the object (excluding unsupported magic attributes and methods). Accessing + any attribute not in this list will raise an `AttributeError`. + + If `spec` is an object (rather than a list of strings) then + `mock.__class__` returns the class of the spec object. This allows mocks + to pass `isinstance` tests. + + * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* + or get an attribute on the mock that isn't on the object passed as + `spec_set` will raise an `AttributeError`. + + * `side_effect`: A function to be called whenever the Mock is called. See + the `side_effect` attribute. Useful for raising exceptions or + dynamically changing return values. The function is called with the same + arguments as the mock, and unless it returns `DEFAULT`, the return + value of this function is used as the return value. + + Alternatively `side_effect` can be an exception class or instance. In + this case the exception will be raised when the mock is called. + + If `side_effect` is an iterable then each call to the mock will return + the next value from the iterable. If any of the members of the iterable + are exceptions they will be raised instead of returned. + + * `return_value`: The value returned when the mock is called. By default + this is a new Mock (created on first access). See the + `return_value` attribute. + + * `wraps`: Item for the mock object to wrap. If `wraps` is not None then + calling the Mock will pass the call through to the wrapped object + (returning the real result). Attribute access on the mock will return a + Mock object that wraps the corresponding attribute of the wrapped object + (so attempting to access an attribute that doesn't exist will raise an + `AttributeError`). + + If the mock has an explicit `return_value` set then calls are not passed + to the wrapped object and the `return_value` is returned instead. + + * `name`: If the mock has a name then it will be used in the repr of the + mock. This can be useful for debugging. The name is propagated to child + mocks. + + Mocks can also be called with arbitrary keyword arguments. These will be + used to set attributes on the mock after it is created. + """ + + + +def _dot_lookup(thing, comp, import_path): + try: + return getattr(thing, comp) + except AttributeError: + __import__(import_path) + return getattr(thing, comp) + + +def _importer(target): + components = target.split('.') + import_path = components.pop(0) + thing = __import__(import_path) + + for comp in components: + import_path += ".%s" % comp + thing = _dot_lookup(thing, comp, import_path) + return thing + + +def _is_started(patcher): + # XXXX horrible + return hasattr(patcher, 'is_local') + + +class _patch(object): + + attribute_name = None + _active_patches = set() + + def __init__( + self, getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs + ): + if new_callable is not None: + if new is not DEFAULT: + raise ValueError( + "Cannot use 'new' and 'new_callable' together" + ) + if autospec is not None: + raise ValueError( + "Cannot use 'autospec' and 'new_callable' together" + ) + + self.getter = getter + self.attribute = attribute + self.new = new + self.new_callable = new_callable + self.spec = spec + self.create = create + self.has_local = False + self.spec_set = spec_set + self.autospec = autospec + self.kwargs = kwargs + self.additional_patchers = [] + + + def copy(self): + patcher = _patch( + self.getter, self.attribute, self.new, self.spec, + self.create, self.spec_set, + self.autospec, self.new_callable, self.kwargs + ) + patcher.attribute_name = self.attribute_name + patcher.additional_patchers = [ + p.copy() for p in self.additional_patchers + ] + return patcher + + + def __call__(self, func): + if isinstance(func, ClassTypes): + return self.decorate_class(func) + return self.decorate_callable(func) + + + def decorate_class(self, klass): + for attr in dir(klass): + if not attr.startswith(patch.TEST_PREFIX): + continue + + attr_value = getattr(klass, attr) + if not hasattr(attr_value, "__call__"): + continue + + patcher = self.copy() + setattr(klass, attr, patcher(attr_value)) + return klass + + + def decorate_callable(self, func): + if hasattr(func, 'patchings'): + func.patchings.append(self) + return func + + @wraps(func) + def patched(*args, **keywargs): + # don't use a with here (backwards compatability with Python 2.4) + extra_args = [] + entered_patchers = [] + + # can't use try...except...finally because of Python 2.4 + # compatibility + exc_info = tuple() + try: + try: + for patching in patched.patchings: + arg = patching.__enter__() + entered_patchers.append(patching) + if patching.attribute_name is not None: + keywargs.update(arg) + elif patching.new is DEFAULT: + extra_args.append(arg) + + args += tuple(extra_args) + return func(*args, **keywargs) + except: + if (patching not in entered_patchers and + _is_started(patching)): + # the patcher may have been started, but an exception + # raised whilst entering one of its additional_patchers + entered_patchers.append(patching) + # Pass the exception to __exit__ + exc_info = sys.exc_info() + # re-raise the exception + raise + finally: + for patching in reversed(entered_patchers): + patching.__exit__(*exc_info) + + patched.patchings = [self] + if hasattr(func, 'func_code'): + # not in Python 3 + patched.compat_co_firstlineno = getattr( + func, "compat_co_firstlineno", + func.func_code.co_firstlineno + ) + return patched + + + def get_original(self): + target = self.getter() + name = self.attribute + + original = DEFAULT + local = False + + try: + original = target.__dict__[name] + except (AttributeError, KeyError): + original = getattr(target, name, DEFAULT) + else: + local = True + + if not self.create and original is DEFAULT: + raise AttributeError( + "%s does not have the attribute %r" % (target, name) + ) + return original, local + + + def __enter__(self): + """Perform the patch.""" + new, spec, spec_set = self.new, self.spec, self.spec_set + autospec, kwargs = self.autospec, self.kwargs + new_callable = self.new_callable + self.target = self.getter() + + # normalise False to None + if spec is False: + spec = None + if spec_set is False: + spec_set = None + if autospec is False: + autospec = None + + if spec is not None and autospec is not None: + raise TypeError("Can't specify spec and autospec") + if ((spec is not None or autospec is not None) and + spec_set not in (True, None)): + raise TypeError("Can't provide explicit spec_set *and* spec or autospec") + + original, local = self.get_original() + + if new is DEFAULT and autospec is None: + inherit = False + if spec is True: + # set spec to the object we are replacing + spec = original + if spec_set is True: + spec_set = original + spec = None + elif spec is not None: + if spec_set is True: + spec_set = spec + spec = None + elif spec_set is True: + spec_set = original + + if spec is not None or spec_set is not None: + if original is DEFAULT: + raise TypeError("Can't use 'spec' with create=True") + if isinstance(original, ClassTypes): + # If we're patching out a class and there is a spec + inherit = True + + Klass = MagicMock + _kwargs = {} + if new_callable is not None: + Klass = new_callable + elif spec is not None or spec_set is not None: + this_spec = spec + if spec_set is not None: + this_spec = spec_set + if _is_list(this_spec): + not_callable = '__call__' not in this_spec + else: + not_callable = not _callable(this_spec) + if not_callable: + Klass = NonCallableMagicMock + + if spec is not None: + _kwargs['spec'] = spec + if spec_set is not None: + _kwargs['spec_set'] = spec_set + + # add a name to mocks + if (isinstance(Klass, type) and + issubclass(Klass, NonCallableMock) and self.attribute): + _kwargs['name'] = self.attribute + + _kwargs.update(kwargs) + new = Klass(**_kwargs) + + if inherit and _is_instance_mock(new): + # we can only tell if the instance should be callable if the + # spec is not a list + this_spec = spec + if spec_set is not None: + this_spec = spec_set + if (not _is_list(this_spec) and not + _instance_callable(this_spec)): + Klass = NonCallableMagicMock + + _kwargs.pop('name') + new.return_value = Klass(_new_parent=new, _new_name='()', + **_kwargs) + elif autospec is not None: + # spec is ignored, new *must* be default, spec_set is treated + # as a boolean. Should we check spec is not None and that spec_set + # is a bool? + if new is not DEFAULT: + raise TypeError( + "autospec creates the mock for you. Can't specify " + "autospec and new." + ) + if original is DEFAULT: + raise TypeError("Can't use 'autospec' with create=True") + spec_set = bool(spec_set) + if autospec is True: + autospec = original + + new = create_autospec(autospec, spec_set=spec_set, + _name=self.attribute, **kwargs) + elif kwargs: + # can't set keyword args when we aren't creating the mock + # XXXX If new is a Mock we could call new.configure_mock(**kwargs) + raise TypeError("Can't pass kwargs to a mock we aren't creating") + + new_attr = new + + self.temp_original = original + self.is_local = local + setattr(self.target, self.attribute, new_attr) + if self.attribute_name is not None: + extra_args = {} + if self.new is DEFAULT: + extra_args[self.attribute_name] = new + for patching in self.additional_patchers: + arg = patching.__enter__() + if patching.new is DEFAULT: + extra_args.update(arg) + return extra_args + + return new + + + def __exit__(self, *exc_info): + """Undo the patch.""" + if not _is_started(self): + raise RuntimeError('stop called on unstarted patcher') + + if self.is_local and self.temp_original is not DEFAULT: + setattr(self.target, self.attribute, self.temp_original) + else: + delattr(self.target, self.attribute) + if not self.create and not hasattr(self.target, self.attribute): + # needed for proxy objects like django settings + setattr(self.target, self.attribute, self.temp_original) + + del self.temp_original + del self.is_local + del self.target + for patcher in reversed(self.additional_patchers): + if _is_started(patcher): + patcher.__exit__(*exc_info) + + + def start(self): + """Activate a patch, returning any created mock.""" + result = self.__enter__() + self._active_patches.add(self) + return result + + + def stop(self): + """Stop an active patch.""" + self._active_patches.discard(self) + return self.__exit__() + + + +def _get_target(target): + try: + target, attribute = target.rsplit('.', 1) + except (TypeError, ValueError): + raise TypeError("Need a valid target to patch. You supplied: %r" % + (target,)) + getter = lambda: _importer(target) + return getter, attribute + + +def _patch_object( + target, attribute, new=DEFAULT, spec=None, + create=False, spec_set=None, autospec=None, + new_callable=None, **kwargs + ): + """ + patch.object(target, attribute, new=DEFAULT, spec=None, create=False, + spec_set=None, autospec=None, new_callable=None, **kwargs) + + patch the named member (`attribute`) on an object (`target`) with a mock + object. + + `patch.object` can be used as a decorator, class decorator or a context + manager. Arguments `new`, `spec`, `create`, `spec_set`, + `autospec` and `new_callable` have the same meaning as for `patch`. Like + `patch`, `patch.object` takes arbitrary keyword arguments for configuring + the mock object it creates. + + When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ + getter = lambda: target + return _patch( + getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs + ) + + +def _patch_multiple(target, spec=None, create=False, spec_set=None, + autospec=None, new_callable=None, **kwargs): + """Perform multiple patches in a single call. It takes the object to be + patched (either as an object or a string to fetch the object by importing) + and keyword arguments for the patches:: + + with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): + ... + + Use `DEFAULT` as the value if you want `patch.multiple` to create + mocks for you. In this case the created mocks are passed into a decorated + function by keyword, and a dictionary is returned when `patch.multiple` is + used as a context manager. + + `patch.multiple` can be used as a decorator, class decorator or a context + manager. The arguments `spec`, `spec_set`, `create`, + `autospec` and `new_callable` have the same meaning as for `patch`. These + arguments will be applied to *all* patches done by `patch.multiple`. + + When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ + if type(target) in (unicode, str): + getter = lambda: _importer(target) + else: + getter = lambda: target + + if not kwargs: + raise ValueError( + 'Must supply at least one keyword argument with patch.multiple' + ) + # need to wrap in a list for python 3, where items is a view + items = list(kwargs.items()) + attribute, new = items[0] + patcher = _patch( + getter, attribute, new, spec, create, spec_set, + autospec, new_callable, {} + ) + patcher.attribute_name = attribute + for attribute, new in items[1:]: + this_patcher = _patch( + getter, attribute, new, spec, create, spec_set, + autospec, new_callable, {} + ) + this_patcher.attribute_name = attribute + patcher.additional_patchers.append(this_patcher) + return patcher + + +def patch( + target, new=DEFAULT, spec=None, create=False, + spec_set=None, autospec=None, new_callable=None, **kwargs + ): + """ + `patch` acts as a function decorator, class decorator or a context + manager. Inside the body of the function or with statement, the `target` + is patched with a `new` object. When the function/with statement exits + the patch is undone. + + If `new` is omitted, then the target is replaced with a + `MagicMock`. If `patch` is used as a decorator and `new` is + omitted, the created mock is passed in as an extra argument to the + decorated function. If `patch` is used as a context manager the created + mock is returned by the context manager. + + `target` should be a string in the form `'package.module.ClassName'`. The + `target` is imported and the specified object replaced with the `new` + object, so the `target` must be importable from the environment you are + calling `patch` from. The target is imported when the decorated function + is executed, not at decoration time. + + The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` + if patch is creating one for you. + + In addition you can pass `spec=True` or `spec_set=True`, which causes + patch to pass in the object being mocked as the spec/spec_set object. + + `new_callable` allows you to specify a different class, or callable object, + that will be called to create the `new` object. By default `MagicMock` is + used. + + A more powerful form of `spec` is `autospec`. If you set `autospec=True` + then the mock with be created with a spec from the object being replaced. + All attributes of the mock will also have the spec of the corresponding + attribute of the object being replaced. Methods and functions being + mocked will have their arguments checked and will raise a `TypeError` if + they are called with the wrong signature. For mocks replacing a class, + their return value (the 'instance') will have the same spec as the class. + + Instead of `autospec=True` you can pass `autospec=some_object` to use an + arbitrary object as the spec instead of the one being replaced. + + By default `patch` will fail to replace attributes that don't exist. If + you pass in `create=True`, and the attribute doesn't exist, patch will + create the attribute for you when the patched function is called, and + delete it again afterwards. This is useful for writing tests against + attributes that your production code creates at runtime. It is off by by + default because it can be dangerous. With it switched on you can write + passing tests against APIs that don't actually exist! + + Patch can be used as a `TestCase` class decorator. It works by + decorating each test method in the class. This reduces the boilerplate + code when your test methods share a common patchings set. `patch` finds + tests by looking for method names that start with `patch.TEST_PREFIX`. + By default this is `test`, which matches the way `unittest` finds tests. + You can specify an alternative prefix by setting `patch.TEST_PREFIX`. + + Patch can be used as a context manager, with the with statement. Here the + patching applies to the indented block after the with statement. If you + use "as" then the patched object will be bound to the name after the + "as"; very useful if `patch` is creating a mock object for you. + + `patch` takes arbitrary keyword arguments. These will be passed to + the `Mock` (or `new_callable`) on construction. + + `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are + available for alternate use-cases. + """ + getter, attribute = _get_target(target) + return _patch( + getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs + ) + + +class _patch_dict(object): + """ + Patch a dictionary, or dictionary like object, and restore the dictionary + to its original state after the test. + + `in_dict` can be a dictionary or a mapping like container. If it is a + mapping then it must at least support getting, setting and deleting items + plus iterating over keys. + + `in_dict` can also be a string specifying the name of the dictionary, which + will then be fetched by importing it. + + `values` can be a dictionary of values to set in the dictionary. `values` + can also be an iterable of `(key, value)` pairs. + + If `clear` is True then the dictionary will be cleared before the new + values are set. + + `patch.dict` can also be called with arbitrary keyword arguments to set + values in the dictionary:: + + with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): + ... + + `patch.dict` can be used as a context manager, decorator or class + decorator. When used as a class decorator `patch.dict` honours + `patch.TEST_PREFIX` for choosing which methods to wrap. + """ + + def __init__(self, in_dict, values=(), clear=False, **kwargs): + if isinstance(in_dict, basestring): + in_dict = _importer(in_dict) + self.in_dict = in_dict + # support any argument supported by dict(...) constructor + self.values = dict(values) + self.values.update(kwargs) + self.clear = clear + self._original = None + + + def __call__(self, f): + if isinstance(f, ClassTypes): + return self.decorate_class(f) + @wraps(f) + def _inner(*args, **kw): + self._patch_dict() + try: + return f(*args, **kw) + finally: + self._unpatch_dict() + + return _inner + + + def decorate_class(self, klass): + for attr in dir(klass): + attr_value = getattr(klass, attr) + if (attr.startswith(patch.TEST_PREFIX) and + hasattr(attr_value, "__call__")): + decorator = _patch_dict(self.in_dict, self.values, self.clear) + decorated = decorator(attr_value) + setattr(klass, attr, decorated) + return klass + + + def __enter__(self): + """Patch the dict.""" + self._patch_dict() + + + def _patch_dict(self): + values = self.values + in_dict = self.in_dict + clear = self.clear + + try: + original = in_dict.copy() + except AttributeError: + # dict like object with no copy method + # must support iteration over keys + original = {} + for key in in_dict: + original[key] = in_dict[key] + self._original = original + + if clear: + _clear_dict(in_dict) + + try: + in_dict.update(values) + except AttributeError: + # dict like object with no update method + for key in values: + in_dict[key] = values[key] + + + def _unpatch_dict(self): + in_dict = self.in_dict + original = self._original + + _clear_dict(in_dict) + + try: + in_dict.update(original) + except AttributeError: + for key in original: + in_dict[key] = original[key] + + + def __exit__(self, *args): + """Unpatch the dict.""" + self._unpatch_dict() + return False + + start = __enter__ + stop = __exit__ + + +def _clear_dict(in_dict): + try: + in_dict.clear() + except AttributeError: + keys = list(in_dict) + for key in keys: + del in_dict[key] + + +def _patch_stopall(): + """Stop all active patches.""" + for patch in list(_patch._active_patches): + patch.stop() + + +patch.object = _patch_object +patch.dict = _patch_dict +patch.multiple = _patch_multiple +patch.stopall = _patch_stopall +patch.TEST_PREFIX = 'test' + +magic_methods = ( + "lt le gt ge eq ne " + "getitem setitem delitem " + "len contains iter " + "hash str sizeof " + "enter exit " + "divmod neg pos abs invert " + "complex int float index " + "trunc floor ceil " +) + +numerics = "add sub mul div floordiv mod lshift rshift and xor or pow " +inplace = ' '.join('i%s' % n for n in numerics.split()) +right = ' '.join('r%s' % n for n in numerics.split()) +extra = '' +if inPy3k: + extra = 'bool next ' +else: + extra = 'unicode long nonzero oct hex truediv rtruediv ' + +# not including __prepare__, __instancecheck__, __subclasscheck__ +# (as they are metaclass methods) +# __del__ is not supported at all as it causes problems if it exists + +_non_defaults = set('__%s__' % method for method in [ + 'cmp', 'getslice', 'setslice', 'coerce', 'subclasses', + 'format', 'get', 'set', 'delete', 'reversed', + 'missing', 'reduce', 'reduce_ex', 'getinitargs', + 'getnewargs', 'getstate', 'setstate', 'getformat', + 'setformat', 'repr', 'dir' +]) + + +def _get_method(name, func): + "Turns a callable object (like a mock) into a real function" + def method(self, *args, **kw): + return func(self, *args, **kw) + method.__name__ = name + return method + + +_magics = set( + '__%s__' % method for method in + ' '.join([magic_methods, numerics, inplace, right, extra]).split() +) + +_all_magics = _magics | _non_defaults + +_unsupported_magics = set([ + '__getattr__', '__setattr__', + '__init__', '__new__', '__prepare__' + '__instancecheck__', '__subclasscheck__', + '__del__' +]) + +_calculate_return_value = { + '__hash__': lambda self: object.__hash__(self), + '__str__': lambda self: object.__str__(self), + '__sizeof__': lambda self: object.__sizeof__(self), + '__unicode__': lambda self: unicode(object.__str__(self)), +} + +_return_values = { + '__lt__': NotImplemented, + '__gt__': NotImplemented, + '__le__': NotImplemented, + '__ge__': NotImplemented, + '__int__': 1, + '__contains__': False, + '__len__': 0, + '__exit__': False, + '__complex__': 1j, + '__float__': 1.0, + '__bool__': True, + '__nonzero__': True, + '__oct__': '1', + '__hex__': '0x1', + '__long__': long(1), + '__index__': 1, +} + + +def _get_eq(self): + def __eq__(other): + ret_val = self.__eq__._mock_return_value + if ret_val is not DEFAULT: + return ret_val + return self is other + return __eq__ + +def _get_ne(self): + def __ne__(other): + if self.__ne__._mock_return_value is not DEFAULT: + return DEFAULT + return self is not other + return __ne__ + +def _get_iter(self): + def __iter__(): + ret_val = self.__iter__._mock_return_value + if ret_val is DEFAULT: + return iter([]) + # if ret_val was already an iterator, then calling iter on it should + # return the iterator unchanged + return iter(ret_val) + return __iter__ + +_side_effect_methods = { + '__eq__': _get_eq, + '__ne__': _get_ne, + '__iter__': _get_iter, +} + + + +def _set_return_value(mock, method, name): + fixed = _return_values.get(name, DEFAULT) + if fixed is not DEFAULT: + method.return_value = fixed + return + + return_calulator = _calculate_return_value.get(name) + if return_calulator is not None: + try: + return_value = return_calulator(mock) + except AttributeError: + # XXXX why do we return AttributeError here? + # set it as a side_effect instead? + return_value = AttributeError(name) + method.return_value = return_value + return + + side_effector = _side_effect_methods.get(name) + if side_effector is not None: + method.side_effect = side_effector(mock) + + + +class MagicMixin(object): + def __init__(self, *args, **kw): + _super(MagicMixin, self).__init__(*args, **kw) + self._mock_set_magics() + + + def _mock_set_magics(self): + these_magics = _magics + + if self._mock_methods is not None: + these_magics = _magics.intersection(self._mock_methods) + + remove_magics = set() + remove_magics = _magics - these_magics + + for entry in remove_magics: + if entry in type(self).__dict__: + # remove unneeded magic methods + delattr(self, entry) + + # don't overwrite existing attributes if called a second time + these_magics = these_magics - set(type(self).__dict__) + + _type = type(self) + for entry in these_magics: + setattr(_type, entry, MagicProxy(entry, self)) + + + +class NonCallableMagicMock(MagicMixin, NonCallableMock): + """A version of `MagicMock` that isn't callable.""" + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + self._mock_set_magics() + + + +class MagicMock(MagicMixin, Mock): + """ + MagicMock is a subclass of Mock with default implementations + of most of the magic methods. You can use MagicMock without having to + configure the magic methods yourself. + + If you use the `spec` or `spec_set` arguments then *only* magic + methods that exist in the spec will be created. + + Attributes and the return value of a `MagicMock` will also be `MagicMocks`. + """ + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + self._mock_set_magics() + + + +class MagicProxy(object): + def __init__(self, name, parent): + self.name = name + self.parent = parent + + def __call__(self, *args, **kwargs): + m = self.create_mock() + return m(*args, **kwargs) + + def create_mock(self): + entry = self.name + parent = self.parent + m = parent._get_child_mock(name=entry, _new_name=entry, + _new_parent=parent) + setattr(parent, entry, m) + _set_return_value(parent, m, entry) + return m + + def __get__(self, obj, _type=None): + return self.create_mock() + + + +class _ANY(object): + "A helper object that compares equal to everything." + + def __eq__(self, other): + return True + + def __ne__(self, other): + return False + + def __repr__(self): + return '' + +ANY = _ANY() + + + +def _format_call_signature(name, args, kwargs): + message = '%s(%%s)' % name + formatted_args = '' + args_string = ', '.join([repr(arg) for arg in args]) + kwargs_string = ', '.join([ + '%s=%r' % (key, value) for key, value in kwargs.items() + ]) + if args_string: + formatted_args = args_string + if kwargs_string: + if formatted_args: + formatted_args += ', ' + formatted_args += kwargs_string + + return message % formatted_args + + + +class _Call(tuple): + """ + A tuple for holding the results of a call to a mock, either in the form + `(args, kwargs)` or `(name, args, kwargs)`. + + If args or kwargs are empty then a call tuple will compare equal to + a tuple without those values. This makes comparisons less verbose:: + + _Call(('name', (), {})) == ('name',) + _Call(('name', (1,), {})) == ('name', (1,)) + _Call(((), {'a': 'b'})) == ({'a': 'b'},) + + The `_Call` object provides a useful shortcut for comparing with call:: + + _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) + _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) + + If the _Call has no name then it will match any name. + """ + def __new__(cls, value=(), name=None, parent=None, two=False, + from_kall=True): + name = '' + args = () + kwargs = {} + _len = len(value) + if _len == 3: + name, args, kwargs = value + elif _len == 2: + first, second = value + if isinstance(first, basestring): + name = first + if isinstance(second, tuple): + args = second + else: + kwargs = second + else: + args, kwargs = first, second + elif _len == 1: + value, = value + if isinstance(value, basestring): + name = value + elif isinstance(value, tuple): + args = value + else: + kwargs = value + + if two: + return tuple.__new__(cls, (args, kwargs)) + + return tuple.__new__(cls, (name, args, kwargs)) + + + def __init__(self, value=(), name=None, parent=None, two=False, + from_kall=True): + self.name = name + self.parent = parent + self.from_kall = from_kall + + + def __eq__(self, other): + if other is ANY: + return True + try: + len_other = len(other) + except TypeError: + return False + + self_name = '' + if len(self) == 2: + self_args, self_kwargs = self + else: + self_name, self_args, self_kwargs = self + + other_name = '' + if len_other == 0: + other_args, other_kwargs = (), {} + elif len_other == 3: + other_name, other_args, other_kwargs = other + elif len_other == 1: + value, = other + if isinstance(value, tuple): + other_args = value + other_kwargs = {} + elif isinstance(value, basestring): + other_name = value + other_args, other_kwargs = (), {} + else: + other_args = () + other_kwargs = value + else: + # len 2 + # could be (name, args) or (name, kwargs) or (args, kwargs) + first, second = other + if isinstance(first, basestring): + other_name = first + if isinstance(second, tuple): + other_args, other_kwargs = second, {} + else: + other_args, other_kwargs = (), second + else: + other_args, other_kwargs = first, second + + if self_name and other_name != self_name: + return False + + # this order is important for ANY to work! + return (other_args, other_kwargs) == (self_args, self_kwargs) + + + def __ne__(self, other): + return not self.__eq__(other) + + + def __call__(self, *args, **kwargs): + if self.name is None: + return _Call(('', args, kwargs), name='()') + + name = self.name + '()' + return _Call((self.name, args, kwargs), name=name, parent=self) + + + def __getattr__(self, attr): + if self.name is None: + return _Call(name=attr, from_kall=False) + name = '%s.%s' % (self.name, attr) + return _Call(name=name, parent=self, from_kall=False) + + + def __repr__(self): + if not self.from_kall: + name = self.name or 'call' + if name.startswith('()'): + name = 'call%s' % name + return name + + if len(self) == 2: + name = 'call' + args, kwargs = self + else: + name, args, kwargs = self + if not name: + name = 'call' + elif not name.startswith('()'): + name = 'call.%s' % name + else: + name = 'call%s' % name + return _format_call_signature(name, args, kwargs) + + + def call_list(self): + """For a call object that represents multiple calls, `call_list` + returns a list of all the intermediate calls as well as the + final call.""" + vals = [] + thing = self + while thing is not None: + if thing.from_kall: + vals.append(thing) + thing = thing.parent + return _CallList(reversed(vals)) + + +call = _Call(from_kall=False) + + + +def create_autospec(spec, spec_set=False, instance=False, _parent=None, + _name=None, **kwargs): + """Create a mock object using another object as a spec. Attributes on the + mock will use the corresponding attribute on the `spec` object as their + spec. + + Functions or methods being mocked will have their arguments checked + to check that they are called with the correct signature. + + If `spec_set` is True then attempting to set attributes that don't exist + on the spec object will raise an `AttributeError`. + + If a class is used as a spec then the return value of the mock (the + instance of the class) will have the same spec. You can use a class as the + spec for an instance object by passing `instance=True`. The returned mock + will only be callable if instances of the mock are callable. + + `create_autospec` also takes arbitrary keyword arguments that are passed to + the constructor of the created mock.""" + if _is_list(spec): + # can't pass a list instance to the mock constructor as it will be + # interpreted as a list of strings + spec = type(spec) + + is_type = isinstance(spec, ClassTypes) + + _kwargs = {'spec': spec} + if spec_set: + _kwargs = {'spec_set': spec} + elif spec is None: + # None we mock with a normal mock without a spec + _kwargs = {} + + _kwargs.update(kwargs) + + Klass = MagicMock + if type(spec) in DescriptorTypes: + # descriptors don't have a spec + # because we don't know what type they return + _kwargs = {} + elif not _callable(spec): + Klass = NonCallableMagicMock + elif is_type and instance and not _instance_callable(spec): + Klass = NonCallableMagicMock + + _new_name = _name + if _parent is None: + # for a top level object no _new_name should be set + _new_name = '' + + mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, + name=_name, **_kwargs) + + if isinstance(spec, FunctionTypes): + # should only happen at the top level because we don't + # recurse for functions + mock = _set_signature(mock, spec) + else: + _check_signature(spec, mock, is_type, instance) + + if _parent is not None and not instance: + _parent._mock_children[_name] = mock + + if is_type and not instance and 'return_value' not in kwargs: + mock.return_value = create_autospec(spec, spec_set, instance=True, + _name='()', _parent=mock) + + for entry in dir(spec): + if _is_magic(entry): + # MagicMock already does the useful magic methods for us + continue + + if isinstance(spec, FunctionTypes) and entry in FunctionAttributes: + # allow a mock to actually be a function + continue + + # XXXX do we need a better way of getting attributes without + # triggering code execution (?) Probably not - we need the actual + # object to mock it so we would rather trigger a property than mock + # the property descriptor. Likewise we want to mock out dynamically + # provided attributes. + # XXXX what about attributes that raise exceptions other than + # AttributeError on being fetched? + # we could be resilient against it, or catch and propagate the + # exception when the attribute is fetched from the mock + try: + original = getattr(spec, entry) + except AttributeError: + continue + + kwargs = {'spec': original} + if spec_set: + kwargs = {'spec_set': original} + + if not isinstance(original, FunctionTypes): + new = _SpecState(original, spec_set, mock, entry, instance) + mock._mock_children[entry] = new + else: + parent = mock + if isinstance(spec, FunctionTypes): + parent = mock.mock + + new = MagicMock(parent=parent, name=entry, _new_name=entry, + _new_parent=parent, **kwargs) + mock._mock_children[entry] = new + skipfirst = _must_skip(spec, entry, is_type) + _check_signature(original, new, skipfirst=skipfirst) + + # so functions created with _set_signature become instance attributes, + # *plus* their underlying mock exists in _mock_children of the parent + # mock. Adding to _mock_children may be unnecessary where we are also + # setting as an instance attribute? + if isinstance(new, FunctionTypes): + setattr(mock, entry, new) + + return mock + + +def _must_skip(spec, entry, is_type): + if not isinstance(spec, ClassTypes): + if entry in getattr(spec, '__dict__', {}): + # instance attribute - shouldn't skip + return False + spec = spec.__class__ + if not hasattr(spec, '__mro__'): + # old style class: can't have descriptors anyway + return is_type + + for klass in spec.__mro__: + result = klass.__dict__.get(entry, DEFAULT) + if result is DEFAULT: + continue + if isinstance(result, (staticmethod, classmethod)): + return False + return is_type + + # shouldn't get here unless function is a dynamically provided attribute + # XXXX untested behaviour + return is_type + + +def _get_class(obj): + try: + return obj.__class__ + except AttributeError: + # in Python 2, _sre.SRE_Pattern objects have no __class__ + return type(obj) + + +class _SpecState(object): + + def __init__(self, spec, spec_set=False, parent=None, + name=None, ids=None, instance=False): + self.spec = spec + self.ids = ids + self.spec_set = spec_set + self.parent = parent + self.instance = instance + self.name = name + + +FunctionTypes = ( + # python function + type(create_autospec), + # instance method + type(ANY.__eq__), + # unbound method + type(_ANY.__eq__), +) + +FunctionAttributes = set([ + 'func_closure', + 'func_code', + 'func_defaults', + 'func_dict', + 'func_doc', + 'func_globals', + 'func_name', +]) + + +file_spec = None + + +def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` method of the file handle to return. + This is an empty string by default. + """ + global file_spec + if file_spec is None: + # set on first use + if inPy3k: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + else: + file_spec = file + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.write.return_value = None + handle.__enter__.return_value = handle + handle.read.return_value = read_data + + mock.return_value = handle + return mock + + +class PropertyMock(Mock): + """ + A mock intended to be used as a property, or other descriptor, on a class. + `PropertyMock` provides `__get__` and `__set__` methods so you can specify + a return value when it is fetched. + + Fetching a `PropertyMock` instance from an object calls the mock, with + no args. Setting it calls the mock with the value being set. + """ + def _get_child_mock(self, **kwargs): + return MagicMock(**kwargs) + + def __get__(self, obj, obj_type): + return self() + def __set__(self, obj, val): + self(val) diff --git a/awx/lib/site-packages/novaclient/__init__.py b/awx/lib/site-packages/novaclient/__init__.py new file mode 100644 index 0000000000..bfa75532f4 --- /dev/null +++ b/awx/lib/site-packages/novaclient/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + + +__version__ = pbr.version.VersionInfo('python-novaclient').version_string() diff --git a/awx/lib/site-packages/novaclient/auth_plugin.py b/awx/lib/site-packages/novaclient/auth_plugin.py new file mode 100644 index 0000000000..8434897884 --- /dev/null +++ b/awx/lib/site-packages/novaclient/auth_plugin.py @@ -0,0 +1,143 @@ +# Copyright 2013 OpenStack Foundation +# Copyright 2013 Spanish National Research Council. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import pkg_resources + +import six + +from novaclient import exceptions +from novaclient import utils + + +logger = logging.getLogger(__name__) + + +_discovered_plugins = {} + + +def discover_auth_systems(): + """Discover the available auth-systems. + + This won't take into account the old style auth-systems. + """ + ep_name = 'openstack.client.auth_plugin' + for ep in pkg_resources.iter_entry_points(ep_name): + try: + auth_plugin = ep.load() + except (ImportError, pkg_resources.UnknownExtra, AttributeError) as e: + logger.debug("ERROR: Cannot load auth plugin %s" % ep.name) + logger.debug(e, exc_info=1) + else: + _discovered_plugins[ep.name] = auth_plugin + + +def load_auth_system_opts(parser): + """Load options needed by the available auth-systems into a parser. + + This function will try to populate the parser with options from the + available plugins. + """ + for name, auth_plugin in six.iteritems(_discovered_plugins): + add_opts_fn = getattr(auth_plugin, "add_opts", None) + if add_opts_fn: + group = parser.add_argument_group("Auth-system '%s' options" % + name) + add_opts_fn(group) + + +def load_plugin(auth_system): + if auth_system in _discovered_plugins: + return _discovered_plugins[auth_system]() + + # NOTE(aloga): If we arrive here, the plugin will be an old-style one, + # so we have to create a fake AuthPlugin for it. + return DeprecatedAuthPlugin(auth_system) + + +class BaseAuthPlugin(object): + """Base class for authentication plugins. + + An authentication plugin needs to override at least the authenticate + method to be a valid plugin. + """ + def __init__(self): + self.opts = {} + + def get_auth_url(self): + """Return the auth url for the plugin (if any).""" + return None + + @staticmethod + def add_opts(parser): + """Populate and return the parser with the options for this plugin. + + If the plugin does not need any options, it should return the same + parser untouched. + """ + return parser + + def parse_opts(self, args): + """Parse the actual auth-system options if any. + + This method is expected to populate the attribute self.opts with a + dict containing the options and values needed to make authentication. + If the dict is empty, the client should assume that it needs the same + options as the 'keystone' auth system (i.e. os_username and + os_password). + + Returns the self.opts dict. + """ + return self.opts + + def authenticate(self, cls, auth_url): + """Authenticate using plugin defined method.""" + raise exceptions.AuthSystemNotFound(self.auth_system) + + +class DeprecatedAuthPlugin(object): + """Class to mimic the AuthPlugin class for deprecated auth systems. + + Old auth systems only define two entry points: openstack.client.auth_url + and openstack.client.authenticate. This class will load those entry points + into a class similar to a valid AuthPlugin. + """ + def __init__(self, auth_system): + self.auth_system = auth_system + + def authenticate(cls, auth_url): + raise exceptions.AuthSystemNotFound(self.auth_system) + + self.opts = {} + + self.get_auth_url = lambda: None + self.authenticate = authenticate + + self._load_endpoints() + + def _load_endpoints(self): + ep_name = 'openstack.client.auth_url' + fn = utils._load_entry_point(ep_name, name=self.auth_system) + if fn: + self.get_auth_url = fn + + ep_name = 'openstack.client.authenticate' + fn = utils._load_entry_point(ep_name, name=self.auth_system) + if fn: + self.authenticate = fn + + def parse_opts(self, args): + return self.opts diff --git a/awx/lib/site-packages/novaclient/base.py b/awx/lib/site-packages/novaclient/base.py new file mode 100644 index 0000000000..120c698fcb --- /dev/null +++ b/awx/lib/site-packages/novaclient/base.py @@ -0,0 +1,489 @@ +# Copyright 2010 Jacob Kaplan-Moss + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base utilities to build API operation managers and objects on top of. +""" + +import abc +import base64 +import contextlib +import hashlib +import inspect +import os + +import six + +from novaclient import exceptions +from novaclient.openstack.common import strutils +from novaclient import utils + + +def getid(obj): + """ + Abstracts the common pattern of allowing both an object or an object's ID + as a parameter when dealing with relationships. + """ + try: + return obj.id + except AttributeError: + return obj + + +class Manager(utils.HookableMixin): + """ + Managers interact with a particular type of API (servers, flavors, images, + etc.) and provide CRUD operations for them. + """ + resource_class = None + + def __init__(self, api): + self.api = api + + def _list(self, url, response_key, obj_class=None, body=None): + if body: + _resp, body = self.api.client.post(url, body=body) + else: + _resp, body = self.api.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + # NOTE(ja): keystone returns values as list as {'values': [ ... ]} + # unlike other services which just return the list... + if isinstance(data, dict): + try: + data = data['values'] + except KeyError: + pass + + with self.completion_cache('human_id', obj_class, mode="w"): + with self.completion_cache('uuid', obj_class, mode="w"): + return [obj_class(self, res, loaded=True) + for res in data if res] + + @contextlib.contextmanager + def completion_cache(self, cache_type, obj_class, mode): + """ + The completion cache store items that can be used for bash + autocompletion, like UUIDs or human-friendly IDs. + + A resource listing will clear and repopulate the cache. + + A resource create will append to the cache. + + Delete is not handled because listings are assumed to be performed + often enough to keep the cache reasonably up-to-date. + """ + base_dir = utils.env('NOVACLIENT_UUID_CACHE_DIR', + default="~/.novaclient") + + # NOTE(sirp): Keep separate UUID caches for each username + endpoint + # pair + username = utils.env('OS_USERNAME', 'NOVA_USERNAME') + url = utils.env('OS_URL', 'NOVA_URL') + uniqifier = hashlib.md5(username.encode('utf-8') + + url.encode('utf-8')).hexdigest() + + cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier)) + + try: + os.makedirs(cache_dir, 0o755) + except OSError: + # NOTE(kiall): This is typicaly either permission denied while + # attempting to create the directory, or the directory + # already exists. Either way, don't fail. + pass + + resource = obj_class.__name__.lower() + filename = "%s-%s-cache" % (resource, cache_type.replace('_', '-')) + path = os.path.join(cache_dir, filename) + + cache_attr = "_%s_cache" % cache_type + + try: + setattr(self, cache_attr, open(path, mode)) + except IOError: + # NOTE(kiall): This is typicaly a permission denied while + # attempting to write the cache file. + pass + + try: + yield + finally: + cache = getattr(self, cache_attr, None) + if cache: + cache.close() + delattr(self, cache_attr) + + def write_to_completion_cache(self, cache_type, val): + cache = getattr(self, "_%s_cache" % cache_type, None) + if cache: + cache.write("%s\n" % val) + + def _get(self, url, response_key): + _resp, body = self.api.client.get(url) + return self.resource_class(self, body[response_key], loaded=True) + + def _create(self, url, body, response_key, return_raw=False, **kwargs): + self.run_hooks('modify_body_for_create', body, **kwargs) + _resp, body = self.api.client.post(url, body=body) + if return_raw: + return body[response_key] + + with self.completion_cache('human_id', self.resource_class, mode="a"): + with self.completion_cache('uuid', self.resource_class, mode="a"): + return self.resource_class(self, body[response_key]) + + def _delete(self, url): + _resp, _body = self.api.client.delete(url) + + def _update(self, url, body, response_key=None, **kwargs): + self.run_hooks('modify_body_for_update', body, **kwargs) + _resp, body = self.api.client.put(url, body=body) + if body: + if response_key: + return self.resource_class(self, body[response_key]) + else: + return self.resource_class(self, body) + + +class ManagerWithFind(Manager): + """ + Like a `Manager`, but with additional `find()`/`findall()` methods. + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def list(self): + pass + + def find(self, **kwargs): + """ + Find a single item with attributes matching ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + matches = self.findall(**kwargs) + num_matches = len(matches) + if num_matches == 0: + msg = "No %s matching %s." % (self.resource_class.__name__, kwargs) + raise exceptions.NotFound(404, msg) + elif num_matches > 1: + raise exceptions.NoUniqueMatch + else: + return matches[0] + + def findall(self, **kwargs): + """ + Find all items with attributes matching ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + found = [] + searches = kwargs.items() + + detailed = True + list_kwargs = {} + + list_argspec = inspect.getargspec(self.list) + if 'detailed' in list_argspec.args: + detailed = ("human_id" not in kwargs and + "name" not in kwargs and + "display_name" not in kwargs) + list_kwargs['detailed'] = detailed + + if 'is_public' in list_argspec.args and 'is_public' in kwargs: + is_public = kwargs['is_public'] + list_kwargs['is_public'] = is_public + if is_public is None: + tmp_kwargs = kwargs.copy() + del tmp_kwargs['is_public'] + searches = tmp_kwargs.items() + + listing = self.list(**list_kwargs) + + for obj in listing: + try: + if all(getattr(obj, attr) == value + for (attr, value) in searches): + if detailed: + found.append(obj) + else: + found.append(self.get(obj.id)) + except AttributeError: + continue + + return found + + +class BootingManagerWithFind(ManagerWithFind): + """Like a `ManagerWithFind`, but has the ability to boot servers.""" + + def _parse_block_device_mapping(self, block_device_mapping): + bdm = [] + + for device_name, mapping in six.iteritems(block_device_mapping): + # + # The mapping is in the format: + # :[]:[]:[] + # + bdm_dict = {'device_name': device_name} + + mapping_parts = mapping.split(':') + source_id = mapping_parts[0] + if len(mapping_parts) == 1: + bdm_dict['volume_id'] = source_id + + elif len(mapping_parts) > 1: + source_type = mapping_parts[1] + if source_type.startswith('snap'): + bdm_dict['snapshot_id'] = source_id + else: + bdm_dict['volume_id'] = source_id + + if len(mapping_parts) > 2 and mapping_parts[2]: + bdm_dict['volume_size'] = str(int(mapping_parts[2])) + + if len(mapping_parts) > 3: + bdm_dict['delete_on_termination'] = mapping_parts[3] + + bdm.append(bdm_dict) + return bdm + + def _boot(self, resource_url, response_key, name, image, flavor, + meta=None, files=None, userdata=None, + reservation_id=None, return_raw=False, min_count=None, + max_count=None, security_groups=None, key_name=None, + availability_zone=None, block_device_mapping=None, + block_device_mapping_v2=None, nics=None, scheduler_hints=None, + config_drive=None, admin_pass=None, disk_config=None, **kwargs): + """ + Create (boot) a new server. + + :param name: Something to name the server. + :param image: The :class:`Image` to boot with. + :param flavor: The :class:`Flavor` to boot onto. + :param meta: A dict of arbitrary key/value metadata to store for this + server. A maximum of five entries is allowed, and both + keys and values must be 255 characters or less. + :param files: A dict of files to overrwrite on the server upon boot. + Keys are file names (i.e. ``/etc/passwd``) and values + are the file contents (either as a string or as a + file-like object). A maximum of five entries is allowed, + and each file must be 10k or less. + :param reservation_id: a UUID for the set of servers being requested. + :param return_raw: If True, don't try to coearse the result into + a Resource object. + :param security_groups: list of security group names + :param key_name: (optional extension) name of keypair to inject into + the instance + :param availability_zone: Name of the availability zone for instance + placement. + :param block_device_mapping: A dict of block device mappings for this + server. + :param block_device_mapping_v2: A dict of block device mappings V2 for + this server. + :param nics: (optional extension) an ordered list of nics to be + added to this server, with information about + connected networks, fixed ips, etc. + :param scheduler_hints: (optional extension) arbitrary key-value pairs + specified by the client to help boot an instance. + :param config_drive: (optional extension) value for config drive + either boolean, or volume-id + :param admin_pass: admin password for the server. + :param disk_config: (optional extension) control how the disk is + partitioned when the server is created. + """ + body = {"server": { + "name": name, + "imageRef": str(getid(image)) if image else '', + "flavorRef": str(getid(flavor)), + }} + if userdata: + if hasattr(userdata, 'read'): + userdata = userdata.read() + + userdata = strutils.safe_encode(userdata) + body["server"]["user_data"] = base64.b64encode(userdata) + if meta: + body["server"]["metadata"] = meta + if reservation_id: + body["server"]["reservation_id"] = reservation_id + if key_name: + body["server"]["key_name"] = key_name + if scheduler_hints: + body['os:scheduler_hints'] = scheduler_hints + if config_drive: + body["server"]["config_drive"] = config_drive + if admin_pass: + body["server"]["adminPass"] = admin_pass + if not min_count: + min_count = 1 + if not max_count: + max_count = min_count + body["server"]["min_count"] = min_count + body["server"]["max_count"] = max_count + + if security_groups: + body["server"]["security_groups"] =\ + [{'name': sg} for sg in security_groups] + + # Files are a slight bit tricky. They're passed in a "personality" + # list to the POST. Each item is a dict giving a file name and the + # base64-encoded contents of the file. We want to allow passing + # either an open file *or* some contents as files here. + if files: + personality = body['server']['personality'] = [] + for filepath, file_or_string in files.items(): + if hasattr(file_or_string, 'read'): + data = file_or_string.read() + else: + data = file_or_string + personality.append({ + 'path': filepath, + 'contents': data.encode('base64'), + }) + + if availability_zone: + body["server"]["availability_zone"] = availability_zone + + # Block device mappings are passed as a list of dictionaries + if block_device_mapping: + body['server']['block_device_mapping'] = \ + self._parse_block_device_mapping(block_device_mapping) + elif block_device_mapping_v2: + # Append the image to the list only if we have new style BDMs + if image: + bdm_dict = {'uuid': image.id, 'source_type': 'image', + 'destination_type': 'local', 'boot_index': 0, + 'delete_on_termination': True} + block_device_mapping_v2.insert(0, bdm_dict) + + body['server']['block_device_mapping_v2'] = block_device_mapping_v2 + + if nics is not None: + # NOTE(tr3buchet): nics can be an empty list + all_net_data = [] + for nic_info in nics: + net_data = {} + # if value is empty string, do not send value in body + if nic_info.get('net-id'): + net_data['uuid'] = nic_info['net-id'] + if nic_info.get('v4-fixed-ip'): + net_data['fixed_ip'] = nic_info['v4-fixed-ip'] + if nic_info.get('port-id'): + net_data['port'] = nic_info['port-id'] + all_net_data.append(net_data) + body['server']['networks'] = all_net_data + + if disk_config is not None: + body['server']['OS-DCF:diskConfig'] = disk_config + + return self._create(resource_url, body, response_key, + return_raw=return_raw, **kwargs) + + +class Resource(object): + """ + A resource represents a particular instance of an object (server, flavor, + etc). This is pretty much just a bag for attributes. + + :param manager: Manager object + :param info: dictionary representing resource attributes + :param loaded: prevent lazy-loading if set to True + """ + HUMAN_ID = False + NAME_ATTR = 'name' + + def __init__(self, manager, info, loaded=False): + self.manager = manager + self._info = info + self._add_details(info) + self._loaded = loaded + + # NOTE(sirp): ensure `id` is already present because if it isn't we'll + # enter an infinite loop of __getattr__ -> get -> __init__ -> + # __getattr__ -> ... + if 'id' in self.__dict__ and len(str(self.id)) == 36: + self.manager.write_to_completion_cache('uuid', self.id) + + human_id = self.human_id + if human_id: + self.manager.write_to_completion_cache('human_id', human_id) + + @property + def human_id(self): + """Subclasses may override this provide a pretty ID which can be used + for bash completion. + """ + if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID: + return utils.slugify(getattr(self, self.NAME_ATTR)) + return None + + def _add_details(self, info): + for (k, v) in six.iteritems(info): + try: + setattr(self, k, v) + self._info[k] = v + except AttributeError: + # In this case we already defined the attribute on the class + pass + + def __getattr__(self, k): + if k not in self.__dict__: + #NOTE(bcwaldon): disallow lazy-loading if already loaded once + if not self.is_loaded(): + self.get() + return self.__getattr__(k) + + raise AttributeError(k) + else: + return self.__dict__[k] + + def __repr__(self): + reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and + k != 'manager') + info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) + return "<%s %s>" % (self.__class__.__name__, info) + + def get(self): + # set_loaded() first ... so if we have to bail, we know we tried. + self.set_loaded(True) + if not hasattr(self.manager, 'get'): + return + + new = self.manager.get(self.id) + if new: + self._add_details(new._info) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if hasattr(self, 'id') and hasattr(other, 'id'): + return self.id == other.id + return self._info == other._info + + def is_loaded(self): + return self._loaded + + def set_loaded(self, val): + self._loaded = val diff --git a/awx/lib/site-packages/novaclient/client.py b/awx/lib/site-packages/novaclient/client.py new file mode 100644 index 0000000000..02d75ec182 --- /dev/null +++ b/awx/lib/site-packages/novaclient/client.py @@ -0,0 +1,446 @@ +# Copyright 2010 Jacob Kaplan-Moss +# Copyright 2011 OpenStack Foundation +# Copyright 2011 Piston Cloud Computing, Inc. + +# All Rights Reserved. +""" +OpenStack Client interface. Handles the REST calls and responses. +""" + +import logging +import os +import time + +import requests + +try: + import json +except ImportError: + import simplejson as json + +from novaclient import exceptions +from novaclient import service_catalog +from novaclient import utils +from novaclient.openstack.common.py3kcompat import urlutils + + +class HTTPClient(object): + + USER_AGENT = 'python-novaclient' + + def __init__(self, user, password, projectid=None, auth_url=None, + insecure=False, timeout=None, proxy_tenant_id=None, + proxy_token=None, region_name=None, + endpoint_type='publicURL', service_type=None, + service_name=None, volume_service_name=None, + timings=False, bypass_url=None, + os_cache=False, no_cache=True, + http_log_debug=False, auth_system='keystone', + auth_plugin=None, + cacert=None, tenant_id=None): + self.user = user + self.password = password + self.projectid = projectid + self.tenant_id = tenant_id + + if auth_system and auth_system != 'keystone' and not auth_plugin: + raise exceptions.AuthSystemNotFound(auth_system) + + if not auth_url and auth_system and auth_system != 'keystone': + auth_url = auth_plugin.get_auth_url() + if not auth_url: + raise exceptions.EndpointNotFound() + self.auth_url = auth_url.rstrip('/') + self.version = 'v1.1' + self.region_name = region_name + self.endpoint_type = endpoint_type + self.service_type = service_type + self.service_name = service_name + self.volume_service_name = volume_service_name + self.timings = timings + self.bypass_url = bypass_url + self.os_cache = os_cache or not no_cache + self.http_log_debug = http_log_debug + if timeout is not None: + self.timeout = float(timeout) + else: + self.timeout = None + + self.times = [] # [("item", starttime, endtime), ...] + + self.management_url = None + self.auth_token = None + self.proxy_token = proxy_token + self.proxy_tenant_id = proxy_tenant_id + self.keyring_saver = None + self.keyring_saved = False + + if insecure: + self.verify_cert = False + else: + if cacert: + self.verify_cert = cacert + else: + self.verify_cert = True + + self.auth_system = auth_system + self.auth_plugin = auth_plugin + + self._logger = logging.getLogger(__name__) + if self.http_log_debug and not self._logger.handlers: + # Logging level is already set on the root logger + ch = logging.StreamHandler() + self._logger.addHandler(ch) + self._logger.propagate = False + if hasattr(requests, 'logging'): + rql = requests.logging.getLogger(requests.__name__) + rql.addHandler(ch) + # Since we have already setup the root logger on debug, we + # have to set it up here on WARNING (its original level) + # otherwise we will get all the requests logging messanges + rql.setLevel(logging.WARNING) + # requests within the same session can reuse TCP connections from pool + self.http = requests.Session() + + def use_token_cache(self, use_it): + self.os_cache = use_it + + def unauthenticate(self): + """Forget all of our authentication information.""" + self.management_url = None + self.auth_token = None + + def set_management_url(self, url): + self.management_url = url + + def get_timings(self): + return self.times + + def reset_timings(self): + self.times = [] + + def http_log_req(self, args, kwargs): + if not self.http_log_debug: + return + + string_parts = ['curl -i'] + for element in args: + if element in ('GET', 'POST', 'DELETE', 'PUT'): + string_parts.append(' -X %s' % element) + else: + string_parts.append(' %s' % element) + + for element in kwargs['headers']: + header = ' -H "%s: %s"' % (element, kwargs['headers'][element]) + string_parts.append(header) + + if 'data' in kwargs: + string_parts.append(" -d '%s'" % (kwargs['data'])) + self._logger.debug("\nREQ: %s\n" % "".join(string_parts)) + + def http_log_resp(self, resp): + if not self.http_log_debug: + return + self._logger.debug( + "RESP: [%s] %s\nRESP BODY: %s\n", + resp.status_code, + resp.headers, + resp.text) + + def request(self, url, method, **kwargs): + kwargs.setdefault('headers', kwargs.get('headers', {})) + kwargs['headers']['User-Agent'] = self.USER_AGENT + kwargs['headers']['Accept'] = 'application/json' + if 'body' in kwargs: + kwargs['headers']['Content-Type'] = 'application/json' + kwargs['data'] = json.dumps(kwargs['body']) + del kwargs['body'] + if self.timeout is not None: + kwargs.setdefault('timeout', self.timeout) + + self.http_log_req((url, method,), kwargs) + resp = self.http.request( + method, + url, + verify=self.verify_cert, + **kwargs) + self.http_log_resp(resp) + + if resp.text: + # TODO(dtroyer): verify the note below in a requests context + # NOTE(alaski): Because force_exceptions_to_status_code=True + # httplib2 returns a connection refused event as a 400 response. + # To determine if it is a bad request or refused connection we need + # to check the body. httplib2 tests check for 'Connection refused' + # or 'actively refused' in the body, so that's what we'll do. + if resp.status_code == 400: + if ('Connection refused' in resp.text or + 'actively refused' in resp.text): + raise exceptions.ConnectionRefused(resp.text) + try: + body = json.loads(resp.text) + except ValueError: + pass + body = None + else: + body = None + + if resp.status_code >= 400: + raise exceptions.from_response(resp, body, url, method) + + return resp, body + + def _time_request(self, url, method, **kwargs): + start_time = time.time() + resp, body = self.request(url, method, **kwargs) + self.times.append(("%s %s" % (method, url), + start_time, time.time())) + return resp, body + + def _cs_request(self, url, method, **kwargs): + if not self.management_url: + self.authenticate() + + # Perform the request once. If we get a 401 back then it + # might be because the auth token expired, so try to + # re-authenticate and try again. If it still fails, bail. + try: + kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token + if self.projectid: + kwargs['headers']['X-Auth-Project-Id'] = self.projectid + + resp, body = self._time_request(self.management_url + url, method, + **kwargs) + return resp, body + except exceptions.Unauthorized as e: + try: + # frist discard auth token, to avoid the possibly expired + # token being re-used in the re-authentication attempt + self.unauthenticate() + self.authenticate() + kwargs['headers']['X-Auth-Token'] = self.auth_token + resp, body = self._time_request(self.management_url + url, + method, **kwargs) + return resp, body + except exceptions.Unauthorized: + raise e + + def get(self, url, **kwargs): + return self._cs_request(url, 'GET', **kwargs) + + def post(self, url, **kwargs): + return self._cs_request(url, 'POST', **kwargs) + + def put(self, url, **kwargs): + return self._cs_request(url, 'PUT', **kwargs) + + def delete(self, url, **kwargs): + return self._cs_request(url, 'DELETE', **kwargs) + + def _extract_service_catalog(self, url, resp, body, extract_token=True): + """See what the auth service told us and process the response. + We may get redirected to another site, fail or actually get + back a service catalog with a token and our endpoints.""" + + # content must always present + if resp.status_code == 200 or resp.status_code == 201: + try: + self.auth_url = url + self.service_catalog = \ + service_catalog.ServiceCatalog(body) + if extract_token: + self.auth_token = self.service_catalog.get_token() + self.tenant_id = self.service_catalog.get_tenant_id() + + management_url = self.service_catalog.url_for( + attr='region', + filter_value=self.region_name, + endpoint_type=self.endpoint_type, + service_type=self.service_type, + service_name=self.service_name, + volume_service_name=self.volume_service_name,) + self.management_url = management_url.rstrip('/') + return None + except exceptions.AmbiguousEndpoints: + print("Found more than one valid endpoint. Use a more " + "restrictive filter") + raise + except KeyError: + raise exceptions.AuthorizationFailure() + except exceptions.EndpointNotFound: + print("Could not find any suitable endpoint. Correct region?") + raise + + elif resp.status_code == 305: + return resp.headers['location'] + else: + raise exceptions.from_response(resp, body, url) + + def _fetch_endpoints_from_auth(self, url): + """We have a token, but don't know the final endpoint for + the region. We have to go back to the auth service and + ask again. This request requires an admin-level token + to work. The proxy token supplied could be from a low-level enduser. + + We can't get this from the keystone service endpoint, we have to use + the admin endpoint. + + This will overwrite our admin token with the user token. + """ + + # GET ...:5001/v2.0/tokens/#####/endpoints + url = '/'.join([url, 'tokens', '%s?belongsTo=%s' + % (self.proxy_token, self.proxy_tenant_id)]) + self._logger.debug("Using Endpoint URL: %s" % url) + resp, body = self._time_request( + url, "GET", headers={'X-Auth-Token': self.auth_token}) + return self._extract_service_catalog(url, resp, body, + extract_token=False) + + def authenticate(self): + magic_tuple = urlutils.urlsplit(self.auth_url) + scheme, netloc, path, query, frag = magic_tuple + port = magic_tuple.port + if port is None: + port = 80 + path_parts = path.split('/') + for part in path_parts: + if len(part) > 0 and part[0] == 'v': + self.version = part + break + + # TODO(sandy): Assume admin endpoint is 35357 for now. + # Ideally this is going to have to be provided by the service catalog. + new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,)) + admin_url = urlutils.urlunsplit( + (scheme, new_netloc, path, query, frag)) + + # FIXME(chmouel): This is to handle backward compatibiliy when + # we didn't have a plugin mechanism for the auth_system. This + # should be removed in the future and have people move to + # OS_AUTH_SYSTEM=rackspace instead. + if "NOVA_RAX_AUTH" in os.environ: + self.auth_system = "rackspace" + + auth_url = self.auth_url + if self.version == "v2.0": # FIXME(chris): This should be better. + while auth_url: + if not self.auth_system or self.auth_system == 'keystone': + auth_url = self._v2_auth(auth_url) + else: + auth_url = self._plugin_auth(auth_url) + + # Are we acting on behalf of another user via an + # existing token? If so, our actual endpoints may + # be different than that of the admin token. + if self.proxy_token: + if self.bypass_url: + self.set_management_url(self.bypass_url) + else: + self._fetch_endpoints_from_auth(admin_url) + # Since keystone no longer returns the user token + # with the endpoints any more, we need to replace + # our service account token with the user token. + self.auth_token = self.proxy_token + else: + try: + while auth_url: + auth_url = self._v1_auth(auth_url) + # In some configurations nova makes redirection to + # v2.0 keystone endpoint. Also, new location does not contain + # real endpoint, only hostname and port. + except exceptions.AuthorizationFailure: + if auth_url.find('v2.0') < 0: + auth_url = auth_url + '/v2.0' + self._v2_auth(auth_url) + + if self.bypass_url: + self.set_management_url(self.bypass_url) + elif not self.management_url: + raise exceptions.Unauthorized('Nova Client') + + # Store the token/mgmt url in the keyring for later requests. + if self.keyring_saver and self.os_cache and not self.keyring_saved: + self.keyring_saver.save(self.auth_token, + self.management_url, + self.tenant_id) + # Don't save it again + self.keyring_saved = True + + def _v1_auth(self, url): + if self.proxy_token: + raise exceptions.NoTokenLookupException() + + headers = {'X-Auth-User': self.user, + 'X-Auth-Key': self.password} + if self.projectid: + headers['X-Auth-Project-Id'] = self.projectid + + resp, body = self._time_request(url, 'GET', headers=headers) + if resp.status_code in (200, 204): # in some cases we get No Content + try: + mgmt_header = 'x-server-management-url' + self.management_url = resp.headers[mgmt_header].rstrip('/') + self.auth_token = resp.headers['x-auth-token'] + self.auth_url = url + except (KeyError, TypeError): + raise exceptions.AuthorizationFailure() + elif resp.status_code == 305: + return resp.headers['location'] + else: + raise exceptions.from_response(resp, body, url) + + def _plugin_auth(self, auth_url): + return self.auth_plugin.authenticate(self, auth_url) + + def _v2_auth(self, url): + """Authenticate against a v2.0 auth service.""" + if self.auth_token: + body = {"auth": { + "token": {"id": self.auth_token}}} + else: + body = {"auth": { + "passwordCredentials": {"username": self.user, + "password": self.password}}} + + if self.tenant_id: + body['auth']['tenantId'] = self.tenant_id + elif self.projectid: + body['auth']['tenantName'] = self.projectid + + return self._authenticate(url, body) + + def _authenticate(self, url, body, **kwargs): + """Authenticate and extract the service catalog.""" + token_url = url + "/tokens" + + # Make sure we follow redirects when trying to reach Keystone + resp, body = self._time_request( + token_url, + "POST", + body=body, + allow_redirects=True, + **kwargs) + + return self._extract_service_catalog(url, resp, body) + + +def get_client_class(version): + version_map = { + '1.1': 'novaclient.v1_1.client.Client', + '2': 'novaclient.v1_1.client.Client', + '3': 'novaclient.v3.client.Client', + } + try: + client_path = version_map[str(version)] + except (KeyError, ValueError): + msg = "Invalid client version '%s'. must be one of: %s" % ( + (version, ', '.join(version_map.keys()))) + raise exceptions.UnsupportedVersion(msg) + + return utils.import_class(client_path) + + +def Client(version, *args, **kwargs): + client_class = get_client_class(version) + return client_class(*args, **kwargs) diff --git a/awx/lib/site-packages/novaclient/crypto.py b/awx/lib/site-packages/novaclient/crypto.py new file mode 100644 index 0000000000..a264823ad2 --- /dev/null +++ b/awx/lib/site-packages/novaclient/crypto.py @@ -0,0 +1,37 @@ +# Copyright 2013 Nebula, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import subprocess + + +class DecryptionFailure(Exception): + pass + + +def decrypt_password(private_key, password): + """Base64 decodes password and unecrypts it with private key. + + Requires openssl binary available in the path""" + unencoded = base64.b64decode(password) + cmd = ['openssl', 'rsautl', '-decrypt', '-inkey', private_key] + proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = proc.communicate(unencoded) + proc.stdin.close() + if proc.returncode: + raise DecryptionFailure(err) + return out diff --git a/awx/lib/site-packages/novaclient/exceptions.py b/awx/lib/site-packages/novaclient/exceptions.py new file mode 100644 index 0000000000..727638bdcb --- /dev/null +++ b/awx/lib/site-packages/novaclient/exceptions.py @@ -0,0 +1,217 @@ +# Copyright 2010 Jacob Kaplan-Moss +""" +Exception definitions. +""" + + +class UnsupportedVersion(Exception): + """Indicates that the user is trying to use an unsupported + version of the API""" + pass + + +class CommandError(Exception): + pass + + +class AuthorizationFailure(Exception): + pass + + +class NoUniqueMatch(Exception): + pass + + +class AuthSystemNotFound(Exception): + """When the user specify a AuthSystem but not installed.""" + def __init__(self, auth_system): + self.auth_system = auth_system + + def __str__(self): + return "AuthSystemNotFound: %s" % repr(self.auth_system) + + +class NoTokenLookupException(Exception): + """This form of authentication does not support looking up + endpoints from an existing token.""" + pass + + +class EndpointNotFound(Exception): + """Could not find Service or Region in Service Catalog.""" + pass + + +class AmbiguousEndpoints(Exception): + """Found more than one matching endpoint in Service Catalog.""" + def __init__(self, endpoints=None): + self.endpoints = endpoints + + def __str__(self): + return "AmbiguousEndpoints: %s" % repr(self.endpoints) + + +class ConnectionRefused(Exception): + """ + Connection refused: the server refused the connection. + """ + def __init__(self, response=None): + self.response = response + + def __str__(self): + return "ConnectionRefused: %s" % repr(self.response) + + +class ClientException(Exception): + """ + The base exception class for all exceptions this library raises. + """ + def __init__(self, code, message=None, details=None, request_id=None, + url=None, method=None): + self.code = code + self.message = message or self.__class__.message + self.details = details + self.request_id = request_id + self.url = url + self.method = method + + def __str__(self): + formatted_string = "%s (HTTP %s)" % (self.message, self.code) + if self.request_id: + formatted_string += " (Request-ID: %s)" % self.request_id + + return formatted_string + + +class BadRequest(ClientException): + """ + HTTP 400 - Bad request: you sent some malformed data. + """ + http_status = 400 + message = "Bad request" + + +class Unauthorized(ClientException): + """ + HTTP 401 - Unauthorized: bad credentials. + """ + http_status = 401 + message = "Unauthorized" + + +class Forbidden(ClientException): + """ + HTTP 403 - Forbidden: your credentials don't give you access to this + resource. + """ + http_status = 403 + message = "Forbidden" + + +class NotFound(ClientException): + """ + HTTP 404 - Not found + """ + http_status = 404 + message = "Not found" + + +class MethodNotAllowed(ClientException): + """ + HTTP 405 - Method Not Allowed + """ + http_status = 405 + message = "Method Not Allowed" + + +class Conflict(ClientException): + """ + HTTP 409 - Conflict + """ + http_status = 409 + message = "Conflict" + + +class OverLimit(ClientException): + """ + HTTP 413 - Over limit: you're over the API limits for this time period. + """ + http_status = 413 + message = "Over limit" + + def __init__(self, *args, **kwargs): + try: + self.retry_after = int(kwargs.pop('retry_after')) + except (KeyError, ValueError): + self.retry_after = 0 + + super(OverLimit, self).__init__(*args, **kwargs) + + +class RateLimit(OverLimit): + """ + HTTP 429 - Rate limit: you've sent too many requests for this time period. + """ + http_status = 429 + message = "Rate limit" + + +# NotImplemented is a python keyword. +class HTTPNotImplemented(ClientException): + """ + HTTP 501 - Not Implemented: the server does not support this operation. + """ + http_status = 501 + message = "Not Implemented" + + +# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__() +# so we can do this: +# _code_map = dict((c.http_status, c) +# for c in ClientException.__subclasses__()) +# +# Instead, we have to hardcode it: +_error_classes = [BadRequest, Unauthorized, Forbidden, NotFound, + MethodNotAllowed, Conflict, OverLimit, RateLimit, + HTTPNotImplemented] +_code_map = dict((c.http_status, c) for c in _error_classes) + + +def from_response(response, body, url, method=None): + """ + Return an instance of an ClientException or subclass + based on an requests response. + + Usage:: + + resp, body = requests.request(...) + if resp.status_code != 200: + raise exception_from_response(resp, rest.text) + """ + kwargs = { + 'code': response.status_code, + 'method': method, + 'url': url, + 'request_id': None, + } + + if response.headers: + kwargs['request_id'] = response.headers.get('x-compute-request-id') + + if 'retry-after' in response.headers: + kwargs['retry_after'] = response.headers.get('retry-after') + + if body: + message = "n/a" + details = "n/a" + + if hasattr(body, 'keys'): + error = body[body.keys()[0]] + message = error.get('message', None) + details = error.get('details', None) + + kwargs['message'] = message + kwargs['details'] = details + + cls = _code_map.get(response.status_code, ClientException) + return cls(**kwargs) diff --git a/awx/lib/site-packages/novaclient/extension.py b/awx/lib/site-packages/novaclient/extension.py new file mode 100644 index 0000000000..ac105070a6 --- /dev/null +++ b/awx/lib/site-packages/novaclient/extension.py @@ -0,0 +1,39 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base +from novaclient import utils + + +class Extension(utils.HookableMixin): + """Extension descriptor.""" + + SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__') + + def __init__(self, name, module): + self.name = name + self.module = module + self._parse_extension_module() + + def _parse_extension_module(self): + self.manager_class = None + for attr_name, attr_value in self.module.__dict__.items(): + if attr_name in self.SUPPORTED_HOOKS: + self.add_hook(attr_name, attr_value) + elif utils.safe_issubclass(attr_value, base.Manager): + self.manager_class = attr_value + + def __repr__(self): + return "" % self.name diff --git a/awx/lib/site-packages/novaclient/openstack/__init__.py b/awx/lib/site-packages/novaclient/openstack/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/novaclient/openstack/common/__init__.py b/awx/lib/site-packages/novaclient/openstack/common/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/novaclient/openstack/common/gettextutils.py b/awx/lib/site-packages/novaclient/openstack/common/gettextutils.py new file mode 100644 index 0000000000..01a4ce59ab --- /dev/null +++ b/awx/lib/site-packages/novaclient/openstack/common/gettextutils.py @@ -0,0 +1,325 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from novaclient.openstack.common.gettextutils import _ +""" + +import copy +import gettext +import logging +import os +import re +try: + import UserString as _userString +except ImportError: + import collections as _userString + +from babel import localedata +import six + +_localedir = os.environ.get('novaclient'.upper() + '_LOCALEDIR') +_t = gettext.translation('novaclient', localedir=_localedir, fallback=True) + +_AVAILABLE_LANGUAGES = {} +USE_LAZY = False + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + global USE_LAZY + USE_LAZY = True + + +def _(msg): + if USE_LAZY: + return Message(msg, 'novaclient') + else: + return _t.ugettext(msg) + + +def install(domain, lazy=False): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + :param domain: the translation domain + :param lazy: indicates whether or not to install the lazy _() function. + The lazy _() introduces a way to do deferred translation + of messages by installing a _ that builds Message objects, + instead of strings, which can then be lazily translated into + any available locale. + """ + if lazy: + # NOTE(mrodden): Lazy gettext functionality. + # + # The following introduces a deferred way to do translations on + # messages in OpenStack. We override the standard _() function + # and % (format string) operation to build Message objects that can + # later be translated when we have more information. + # + # Also included below is an example LocaleHandler that translates + # Messages to an associated locale, effectively allowing many logs, + # each with their own locale. + + def _lazy_gettext(msg): + """Create and return a Message object. + + Lazy gettext function for a given domain, it is a factory method + for a project/module to get a lazy gettext function for its own + translation domain (i.e. nova, glance, cinder, etc.) + + Message encapsulates a string so that we can translate + it later when needed. + """ + return Message(msg, domain) + + import __builtin__ + __builtin__.__dict__['_'] = _lazy_gettext + else: + localedir = '%s_LOCALEDIR' % domain.upper() + gettext.install(domain, + localedir=os.environ.get(localedir), + unicode=True) + + +class Message(_userString.UserString, object): + """Class used to encapsulate translatable messages.""" + def __init__(self, msg, domain): + # _msg is the gettext msgid and should never change + self._msg = msg + self._left_extra_msg = '' + self._right_extra_msg = '' + self.params = None + self.locale = None + self.domain = domain + + @property + def data(self): + # NOTE(mrodden): this should always resolve to a unicode string + # that best represents the state of the message currently + + localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') + if self.locale: + lang = gettext.translation(self.domain, + localedir=localedir, + languages=[self.locale], + fallback=True) + else: + # use system locale for translations + lang = gettext.translation(self.domain, + localedir=localedir, + fallback=True) + + full_msg = (self._left_extra_msg + + lang.ugettext(self._msg) + + self._right_extra_msg) + + if self.params is not None: + full_msg = full_msg % self.params + + return six.text_type(full_msg) + + def _save_dictionary_parameter(self, dict_param): + full_msg = self.data + # look for %(blah) fields in string; + # ignore %% and deal with the + # case where % is first character on the line + keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg) + + # if we don't find any %(blah) blocks but have a %s + if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg): + # apparently the full dictionary is the parameter + params = copy.deepcopy(dict_param) + else: + params = {} + for key in keys: + try: + params[key] = copy.deepcopy(dict_param[key]) + except TypeError: + # cast uncopyable thing to unicode string + params[key] = unicode(dict_param[key]) + + return params + + def _save_parameters(self, other): + # we check for None later to see if + # we actually have parameters to inject, + # so encapsulate if our parameter is actually None + if other is None: + self.params = (other, ) + elif isinstance(other, dict): + self.params = self._save_dictionary_parameter(other) + else: + # fallback to casting to unicode, + # this will handle the problematic python code-like + # objects that cannot be deep-copied + try: + self.params = copy.deepcopy(other) + except TypeError: + self.params = unicode(other) + + return self + + # overrides to be more string-like + def __unicode__(self): + return self.data + + def __str__(self): + return self.data.encode('utf-8') + + def __getstate__(self): + to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', + 'domain', 'params', 'locale'] + new_dict = self.__dict__.fromkeys(to_copy) + for attr in to_copy: + new_dict[attr] = copy.deepcopy(self.__dict__[attr]) + + return new_dict + + def __setstate__(self, state): + for (k, v) in state.items(): + setattr(self, k, v) + + # operator overloads + def __add__(self, other): + copied = copy.deepcopy(self) + copied._right_extra_msg += other.__str__() + return copied + + def __radd__(self, other): + copied = copy.deepcopy(self) + copied._left_extra_msg += other.__str__() + return copied + + def __mod__(self, other): + # do a format string to catch and raise + # any possible KeyErrors from missing parameters + self.data % other + copied = copy.deepcopy(self) + return copied._save_parameters(other) + + def __mul__(self, other): + return self.data * other + + def __rmul__(self, other): + return other * self.data + + def __getitem__(self, key): + return self.data[key] + + def __getslice__(self, start, end): + return self.data.__getslice__(start, end) + + def __getattribute__(self, name): + # NOTE(mrodden): handle lossy operations that we can't deal with yet + # These override the UserString implementation, since UserString + # uses our __class__ attribute to try and build a new message + # after running the inner data string through the operation. + # At that point, we have lost the gettext message id and can just + # safely resolve to a string instead. + ops = ['capitalize', 'center', 'decode', 'encode', + 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', + 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] + if name in ops: + return getattr(self.data, name) + else: + return _userString.UserString.__getattribute__(self, name) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and all projects udpate + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def get_localized_message(message, user_locale): + """Gets a localized version of the given message in the given locale.""" + if isinstance(message, Message): + if user_locale: + message.locale = user_locale + return unicode(message) + else: + return message + + +class LocaleHandler(logging.Handler): + """Handler that can have a locale associated to translate Messages. + + A quick example of how to utilize the Message class above. + LocaleHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating the internal Message. + """ + + def __init__(self, locale, target): + """Initialize a LocaleHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + logging.Handler.__init__(self) + self.locale = locale + self.target = target + + def emit(self, record): + if isinstance(record.msg, Message): + # set the locale and resolve to a string + record.msg.locale = self.locale + + self.target.emit(record) diff --git a/awx/lib/site-packages/novaclient/openstack/common/py3kcompat/__init__.py b/awx/lib/site-packages/novaclient/openstack/common/py3kcompat/__init__.py new file mode 100644 index 0000000000..be894cf506 --- /dev/null +++ b/awx/lib/site-packages/novaclient/openstack/common/py3kcompat/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# diff --git a/awx/lib/site-packages/novaclient/openstack/common/py3kcompat/urlutils.py b/awx/lib/site-packages/novaclient/openstack/common/py3kcompat/urlutils.py new file mode 100644 index 0000000000..04b3418dae --- /dev/null +++ b/awx/lib/site-packages/novaclient/openstack/common/py3kcompat/urlutils.py @@ -0,0 +1,49 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Python2/Python3 compatibility layer for OpenStack +""" + +import six + +if six.PY3: + # python3 + import urllib.parse + + urlencode = urllib.parse.urlencode + urljoin = urllib.parse.urljoin + quote = urllib.parse.quote + parse_qsl = urllib.parse.parse_qsl + urlparse = urllib.parse.urlparse + urlsplit = urllib.parse.urlsplit + urlunsplit = urllib.parse.urlunsplit +else: + # python2 + import urllib + import urlparse + + urlencode = urllib.urlencode + quote = urllib.quote + + parse = urlparse + parse_qsl = parse.parse_qsl + urljoin = parse.urljoin + urlparse = parse.urlparse + urlsplit = parse.urlsplit + urlunsplit = parse.urlunsplit diff --git a/awx/lib/site-packages/novaclient/openstack/common/strutils.py b/awx/lib/site-packages/novaclient/openstack/common/strutils.py new file mode 100644 index 0000000000..ef139aaade --- /dev/null +++ b/awx/lib/site-packages/novaclient/openstack/common/strutils.py @@ -0,0 +1,218 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import re +import sys +import unicodedata + +import six + +from novaclient.openstack.common.gettextutils import _ # noqa + + +# Used for looking up extensions of text +# to their 'multiplied' byte amount +BYTE_MULTIPLIERS = { + '': 1, + 't': 1024 ** 4, + 'g': 1024 ** 3, + 'm': 1024 ** 2, + 'k': 1024, +} +BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)') + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else is considered False. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, six.string_types): + subject = str(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return False + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming str using `incoming` if they're not already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an isntance of str + """ + if not isinstance(text, six.string_types): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, six.text_type): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming str/unicode using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an isntance of str + """ + if not isinstance(text, six.string_types): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, six.text_type): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + + return text + + +def to_bytes(text, default=0): + """Converts a string into an integer of bytes. + + Looks at the last characters of the text to determine + what conversion is needed to turn the input text into a byte number. + Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) + + :param text: String input for bytes size conversion. + :param default: Default return value when text is blank. + + """ + match = BYTE_REGEX.search(text) + if match: + magnitude = int(match.group(1)) + mult_key_org = match.group(2) + if not mult_key_org: + return magnitude + elif text: + msg = _('Invalid string format: %s') % text + raise TypeError(msg) + else: + return default + mult_key = mult_key_org.lower().replace('b', '', 1) + multiplier = BYTE_MULTIPLIERS.get(mult_key) + if multiplier is None: + msg = _('Unknown byte multiplier: %s') % mult_key_org + raise TypeError(msg) + return magnitude * multiplier + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) diff --git a/awx/lib/site-packages/novaclient/openstack/common/timeutils.py b/awx/lib/site-packages/novaclient/openstack/common/timeutils.py new file mode 100644 index 0000000000..60f02bcb97 --- /dev/null +++ b/awx/lib/site-packages/novaclient/openstack/common/timeutils.py @@ -0,0 +1,194 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 +import six + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format.""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(unicode(e)) + except TypeError as e: + raise ValueError(unicode(e)) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns a iso8601 formated date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + """ + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times. + """ + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :params dt: the time + :params window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/awx/lib/site-packages/novaclient/openstack/common/uuidutils.py b/awx/lib/site-packages/novaclient/openstack/common/uuidutils.py new file mode 100644 index 0000000000..7608acb942 --- /dev/null +++ b/awx/lib/site-packages/novaclient/openstack/common/uuidutils.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/awx/lib/site-packages/novaclient/service_catalog.py b/awx/lib/site-packages/novaclient/service_catalog.py new file mode 100644 index 0000000000..2c4c997167 --- /dev/null +++ b/awx/lib/site-packages/novaclient/service_catalog.py @@ -0,0 +1,88 @@ +# Copyright 2011 OpenStack Foundation +# Copyright 2011, Piston Cloud Computing, Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import novaclient.exceptions + + +class ServiceCatalog(object): + """Helper methods for dealing with a Keystone Service Catalog.""" + + def __init__(self, resource_dict): + self.catalog = resource_dict + + def get_token(self): + return self.catalog['access']['token']['id'] + + def get_tenant_id(self): + return self.catalog['access']['token']['tenant']['id'] + + def url_for(self, attr=None, filter_value=None, + service_type=None, endpoint_type='publicURL', + service_name=None, volume_service_name=None): + """Fetch the public URL from the Compute service for + a particular endpoint attribute. If none given, return + the first. See tests for sample service catalog.""" + matching_endpoints = [] + if 'endpoints' in self.catalog: + # We have a bastardized service catalog. Treat it special. :/ + for endpoint in self.catalog['endpoints']: + if not filter_value or endpoint[attr] == filter_value: + # Ignore 1.0 compute endpoints + if endpoint.get("type") == 'compute' and \ + endpoint.get('versionId') in (None, '1.1', '2'): + matching_endpoints.append(endpoint) + if not matching_endpoints: + raise novaclient.exceptions.EndpointNotFound() + + # We don't always get a service catalog back ... + if 'serviceCatalog' not in self.catalog['access']: + return None + + # Full catalog ... + catalog = self.catalog['access']['serviceCatalog'] + + for service in catalog: + if service.get("type") != service_type: + continue + + if (service_name and service_type == 'compute' and + service.get('name') != service_name): + continue + + if (volume_service_name and service_type == 'volume' and + service.get('name') != volume_service_name): + continue + + endpoints = service['endpoints'] + for endpoint in endpoints: + # Ignore 1.0 compute endpoints + if service.get("type") == 'compute' and \ + endpoint.get('versionId', '2') not in ('1.1', '2'): + continue + if not filter_value or \ + endpoint.get(attr).lower() == filter_value.lower(): + endpoint["serviceName"] = service.get("name") + matching_endpoints.append(endpoint) + + if not matching_endpoints: + raise novaclient.exceptions.EndpointNotFound() + elif len(matching_endpoints) > 1: + raise novaclient.exceptions.AmbiguousEndpoints( + endpoints=matching_endpoints) + else: + return matching_endpoints[0][endpoint_type] diff --git a/awx/lib/site-packages/novaclient/shell.py b/awx/lib/site-packages/novaclient/shell.py new file mode 100644 index 0000000000..8e78608792 --- /dev/null +++ b/awx/lib/site-packages/novaclient/shell.py @@ -0,0 +1,747 @@ +# Copyright 2010 Jacob Kaplan-Moss +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Command-line interface to the OpenStack Nova API. +""" + +from __future__ import print_function +import argparse +import getpass +import glob +import imp +import itertools +import logging +import os +import pkgutil +import sys + +import pkg_resources +import six + +HAS_KEYRING = False +all_errors = ValueError +try: + import keyring + HAS_KEYRING = True + try: + if isinstance(keyring.get_keyring(), keyring.backend.GnomeKeyring): + import gnomekeyring + all_errors = (ValueError, + gnomekeyring.IOError, + gnomekeyring.NoKeyringDaemonError) + except Exception: + pass +except ImportError: + pass + +import novaclient +import novaclient.auth_plugin +from novaclient import client +from novaclient import exceptions as exc +import novaclient.extension +from novaclient.openstack.common import strutils +from novaclient import utils +from novaclient.v1_1 import shell as shell_v1_1 +from novaclient.v3 import shell as shell_v3 + +DEFAULT_OS_COMPUTE_API_VERSION = "1.1" +DEFAULT_NOVA_ENDPOINT_TYPE = 'publicURL' +DEFAULT_NOVA_SERVICE_TYPE = 'compute' + +logger = logging.getLogger(__name__) + + +def positive_non_zero_float(text): + if text is None: + return None + try: + value = float(text) + except ValueError: + msg = "%s must be a float" % text + raise argparse.ArgumentTypeError(msg) + if value <= 0: + msg = "%s must be greater than 0" % text + raise argparse.ArgumentTypeError(msg) + return value + + +class SecretsHelper(object): + def __init__(self, args, client): + self.args = args + self.client = client + self.key = None + + def _validate_string(self, text): + if text is None or len(text) == 0: + return False + return True + + def _make_key(self): + if self.key is not None: + return self.key + keys = [ + self.client.auth_url, + self.client.projectid, + self.client.user, + self.client.region_name, + self.client.endpoint_type, + self.client.service_type, + self.client.service_name, + self.client.volume_service_name, + ] + for (index, key) in enumerate(keys): + if key is None: + keys[index] = '?' + else: + keys[index] = str(keys[index]) + self.key = "/".join(keys) + return self.key + + def _prompt_password(self, verify=True): + pw = None + if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty(): + # Check for Ctl-D + try: + while True: + pw1 = getpass.getpass('OS Password: ') + if verify: + pw2 = getpass.getpass('Please verify: ') + else: + pw2 = pw1 + if pw1 == pw2 and self._validate_string(pw1): + pw = pw1 + break + except EOFError: + pass + return pw + + def save(self, auth_token, management_url, tenant_id): + if not HAS_KEYRING or not self.args.os_cache: + return + if (auth_token == self.auth_token and + management_url == self.management_url): + # Nothing changed.... + return + if not all([management_url, auth_token, tenant_id]): + raise ValueError("Unable to save empty management url/auth token") + value = "|".join([str(auth_token), + str(management_url), + str(tenant_id)]) + keyring.set_password("novaclient_auth", self._make_key(), value) + + @property + def password(self): + if self._validate_string(self.args.os_password): + return self.args.os_password + verify_pass = utils.bool_from_str(utils.env("OS_VERIFY_PASSWORD")) + return self._prompt_password(verify_pass) + + @property + def management_url(self): + if not HAS_KEYRING or not self.args.os_cache: + return None + management_url = None + try: + block = keyring.get_password('novaclient_auth', self._make_key()) + if block: + _token, management_url, _tenant_id = block.split('|', 2) + except all_errors: + pass + return management_url + + @property + def auth_token(self): + # Now is where it gets complicated since we + # want to look into the keyring module, if it + # exists and see if anything was provided in that + # file that we can use. + if not HAS_KEYRING or not self.args.os_cache: + return None + token = None + try: + block = keyring.get_password('novaclient_auth', self._make_key()) + if block: + token, _management_url, _tenant_id = block.split('|', 2) + except all_errors: + pass + return token + + @property + def tenant_id(self): + if not HAS_KEYRING or not self.args.os_cache: + return None + tenant_id = None + try: + block = keyring.get_password('novaclient_auth', self._make_key()) + if block: + _token, _management_url, tenant_id = block.split('|', 2) + except all_errors: + pass + return tenant_id + + +class NovaClientArgumentParser(argparse.ArgumentParser): + + def __init__(self, *args, **kwargs): + super(NovaClientArgumentParser, self).__init__(*args, **kwargs) + + def error(self, message): + """error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + """ + self.print_usage(sys.stderr) + #FIXME(lzyeval): if changes occur in argparse.ArgParser._check_value + choose_from = ' (choose from' + progparts = self.prog.partition(' ') + self.exit(2, "error: %(errmsg)s\nTry '%(mainp)s help %(subp)s'" + " for more information.\n" % + {'errmsg': message.split(choose_from)[0], + 'mainp': progparts[0], + 'subp': progparts[2]}) + + +class OpenStackComputeShell(object): + + def get_base_parser(self): + parser = NovaClientArgumentParser( + prog='nova', + description=__doc__.strip(), + epilog='See "nova help COMMAND" ' + 'for help on a specific command.', + add_help=False, + formatter_class=OpenStackHelpFormatter, + ) + + # Global arguments + parser.add_argument('-h', '--help', + action='store_true', + help=argparse.SUPPRESS, + ) + + parser.add_argument('--version', + action='version', + version=novaclient.__version__) + + parser.add_argument('--debug', + default=False, + action='store_true', + help="Print debugging output") + + parser.add_argument('--no-cache', + default=not utils.bool_from_str( + utils.env('OS_NO_CACHE', default='true')), + action='store_false', + dest='os_cache', + help=argparse.SUPPRESS) + parser.add_argument('--no_cache', + action='store_false', + dest='os_cache', + help=argparse.SUPPRESS) + + parser.add_argument('--os-cache', + default=utils.env('OS_CACHE', default=False), + action='store_true', + help="Use the auth token cache.") + + parser.add_argument('--timings', + default=False, + action='store_true', + help="Print call timing info") + + parser.add_argument('--timeout', + default=600, + metavar='', + type=positive_non_zero_float, + help="Set HTTP call timeout (in seconds)") + + parser.add_argument('--os-username', + metavar='', + default=utils.env('OS_USERNAME', 'NOVA_USERNAME'), + help='Defaults to env[OS_USERNAME].') + parser.add_argument('--os_username', + help=argparse.SUPPRESS) + + parser.add_argument('--os-password', + metavar='', + default=utils.env('OS_PASSWORD', 'NOVA_PASSWORD'), + help='Defaults to env[OS_PASSWORD].') + parser.add_argument('--os_password', + help=argparse.SUPPRESS) + + parser.add_argument('--os-tenant-name', + metavar='', + default=utils.env('OS_TENANT_NAME', 'NOVA_PROJECT_ID'), + help='Defaults to env[OS_TENANT_NAME].') + parser.add_argument('--os_tenant_name', + help=argparse.SUPPRESS) + + parser.add_argument('--os-tenant-id', + metavar='', + default=utils.env('OS_TENANT_ID'), + help='Defaults to env[OS_TENANT_ID].') + + parser.add_argument('--os-auth-url', + metavar='', + default=utils.env('OS_AUTH_URL', 'NOVA_URL'), + help='Defaults to env[OS_AUTH_URL].') + parser.add_argument('--os_auth_url', + help=argparse.SUPPRESS) + + parser.add_argument('--os-region-name', + metavar='', + default=utils.env('OS_REGION_NAME', 'NOVA_REGION_NAME'), + help='Defaults to env[OS_REGION_NAME].') + parser.add_argument('--os_region_name', + help=argparse.SUPPRESS) + + parser.add_argument('--os-auth-system', + metavar='', + default=utils.env('OS_AUTH_SYSTEM'), + help='Defaults to env[OS_AUTH_SYSTEM].') + parser.add_argument('--os_auth_system', + help=argparse.SUPPRESS) + + parser.add_argument('--service-type', + metavar='', + help='Defaults to compute for most actions') + parser.add_argument('--service_type', + help=argparse.SUPPRESS) + + parser.add_argument('--service-name', + metavar='', + default=utils.env('NOVA_SERVICE_NAME'), + help='Defaults to env[NOVA_SERVICE_NAME]') + parser.add_argument('--service_name', + help=argparse.SUPPRESS) + + parser.add_argument('--volume-service-name', + metavar='', + default=utils.env('NOVA_VOLUME_SERVICE_NAME'), + help='Defaults to env[NOVA_VOLUME_SERVICE_NAME]') + parser.add_argument('--volume_service_name', + help=argparse.SUPPRESS) + + parser.add_argument('--endpoint-type', + metavar='', + default=utils.env('NOVA_ENDPOINT_TYPE', + default=DEFAULT_NOVA_ENDPOINT_TYPE), + help='Defaults to env[NOVA_ENDPOINT_TYPE] or ' + + DEFAULT_NOVA_ENDPOINT_TYPE + '.') + # NOTE(dtroyer): We can't add --endpoint_type here due to argparse + # thinking usage-list --end is ambiguous; but it + # works fine with only --endpoint-type present + # Go figure. I'm leaving this here for doc purposes. + #parser.add_argument('--endpoint_type', + # help=argparse.SUPPRESS) + + parser.add_argument('--os-compute-api-version', + metavar='', + default=utils.env('OS_COMPUTE_API_VERSION', + default=DEFAULT_OS_COMPUTE_API_VERSION), + help='Accepts 1.1 or 3, ' + 'defaults to env[OS_COMPUTE_API_VERSION].') + parser.add_argument('--os_compute_api_version', + help=argparse.SUPPRESS) + + parser.add_argument('--os-cacert', + metavar='', + default=utils.env('OS_CACERT', default=None), + help='Specify a CA bundle file to use in ' + 'verifying a TLS (https) server certificate. ' + 'Defaults to env[OS_CACERT]') + + parser.add_argument('--insecure', + default=utils.env('NOVACLIENT_INSECURE', default=False), + action='store_true', + help="Explicitly allow novaclient to perform \"insecure\" " + "SSL (https) requests. The server's certificate will " + "not be verified against any certificate authorities. " + "This option should be used with caution.") + + parser.add_argument('--bypass-url', + metavar='', + dest='bypass_url', + help="Use this API endpoint instead of the Service Catalog") + parser.add_argument('--bypass_url', + help=argparse.SUPPRESS) + + # The auth-system-plugins might require some extra options + novaclient.auth_plugin.load_auth_system_opts(parser) + + return parser + + def get_subcommand_parser(self, version): + parser = self.get_base_parser() + + self.subcommands = {} + subparsers = parser.add_subparsers(metavar='') + + try: + actions_module = { + '1.1': shell_v1_1, + '2': shell_v1_1, + '3': shell_v3, + }[version] + except KeyError: + actions_module = shell_v1_1 + + self._find_actions(subparsers, actions_module) + self._find_actions(subparsers, self) + + for extension in self.extensions: + self._find_actions(subparsers, extension.module) + + self._add_bash_completion_subparser(subparsers) + + return parser + + def _discover_extensions(self, version): + extensions = [] + for name, module in itertools.chain( + self._discover_via_python_path(), + self._discover_via_contrib_path(version), + self._discover_via_entry_points()): + + extension = novaclient.extension.Extension(name, module) + extensions.append(extension) + + return extensions + + def _discover_via_python_path(self): + for (module_loader, name, _ispkg) in pkgutil.iter_modules(): + if name.endswith('_python_novaclient_ext'): + if not hasattr(module_loader, 'load_module'): + # Python 2.6 compat: actually get an ImpImporter obj + module_loader = module_loader.find_module(name) + + module = module_loader.load_module(name) + if hasattr(module, 'extension_name'): + name = module.extension_name + + yield name, module + + def _discover_via_contrib_path(self, version): + module_path = os.path.dirname(os.path.abspath(__file__)) + version_str = "v%s" % version.replace('.', '_') + ext_path = os.path.join(module_path, version_str, 'contrib') + ext_glob = os.path.join(ext_path, "*.py") + + for ext_path in glob.iglob(ext_glob): + name = os.path.basename(ext_path)[:-3] + + if name == "__init__": + continue + + module = imp.load_source(name, ext_path) + yield name, module + + def _discover_via_entry_points(self): + for ep in pkg_resources.iter_entry_points('novaclient.extension'): + name = ep.name + module = ep.load() + + yield name, module + + def _add_bash_completion_subparser(self, subparsers): + subparser = subparsers.add_parser('bash_completion', + add_help=False, + formatter_class=OpenStackHelpFormatter + ) + self.subcommands['bash_completion'] = subparser + subparser.set_defaults(func=self.do_bash_completion) + + def _find_actions(self, subparsers, actions_module): + for attr in (a for a in dir(actions_module) if a.startswith('do_')): + # I prefer to be hypen-separated instead of underscores. + command = attr[3:].replace('_', '-') + callback = getattr(actions_module, attr) + desc = callback.__doc__ or '' + action_help = desc.strip() + arguments = getattr(callback, 'arguments', []) + + subparser = subparsers.add_parser(command, + help=action_help, + description=desc, + add_help=False, + formatter_class=OpenStackHelpFormatter + ) + subparser.add_argument('-h', '--help', + action='help', + help=argparse.SUPPRESS, + ) + self.subcommands[command] = subparser + for (args, kwargs) in arguments: + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=callback) + + def setup_debugging(self, debug): + if not debug: + return + + streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s" + # Set up the root logger to debug so that the submodules can + # print debug messages + logging.basicConfig(level=logging.DEBUG, + format=streamformat) + + def main(self, argv): + + # Parse args once to find version and debug settings + parser = self.get_base_parser() + (options, args) = parser.parse_known_args(argv) + self.setup_debugging(options.debug) + + # Discover available auth plugins + novaclient.auth_plugin.discover_auth_systems() + + # build available subcommands based on version + self.extensions = self._discover_extensions( + options.os_compute_api_version) + self._run_extension_hooks('__pre_parse_args__') + + # NOTE(dtroyer): Hackery to handle --endpoint_type due to argparse + # thinking usage-list --end is ambiguous; but it + # works fine with only --endpoint-type present + # Go figure. + if '--endpoint_type' in argv: + spot = argv.index('--endpoint_type') + argv[spot] = '--endpoint-type' + + subcommand_parser = self.get_subcommand_parser( + options.os_compute_api_version) + self.parser = subcommand_parser + + if options.help or not argv: + subcommand_parser.print_help() + return 0 + + args = subcommand_parser.parse_args(argv) + self._run_extension_hooks('__post_parse_args__', args) + + # Short-circuit and deal with help right away. + if args.func == self.do_help: + self.do_help(args) + return 0 + elif args.func == self.do_bash_completion: + self.do_bash_completion(args) + return 0 + + (os_username, os_tenant_name, os_tenant_id, os_auth_url, + os_region_name, os_auth_system, endpoint_type, insecure, + service_type, service_name, volume_service_name, + bypass_url, os_cache, cacert, timeout) = ( + args.os_username, + args.os_tenant_name, args.os_tenant_id, + args.os_auth_url, + args.os_region_name, args.os_auth_system, + args.endpoint_type, args.insecure, args.service_type, + args.service_name, args.volume_service_name, + args.bypass_url, args.os_cache, + args.os_cacert, args.timeout) + + if os_auth_system and os_auth_system != "keystone": + auth_plugin = novaclient.auth_plugin.load_plugin(os_auth_system) + else: + auth_plugin = None + + # Fetched and set later as needed + os_password = None + + if not endpoint_type: + endpoint_type = DEFAULT_NOVA_ENDPOINT_TYPE + + if not service_type: + service_type = DEFAULT_NOVA_SERVICE_TYPE + service_type = utils.get_service_type(args.func) or service_type + + #FIXME(usrleon): Here should be restrict for project id same as + # for os_username or os_password but for compatibility it is not. + if not utils.isunauthenticated(args.func): + if auth_plugin: + auth_plugin.parse_opts(args) + + if not auth_plugin or not auth_plugin.opts: + if not os_username: + raise exc.CommandError("You must provide a username " + "via either --os-username or env[OS_USERNAME]") + + if not os_tenant_name and not os_tenant_id: + raise exc.CommandError("You must provide a tenant name " + "or tenant id via --os-tenant-name, " + "--os-tenant-id, env[OS_TENANT_NAME] " + "or env[OS_TENANT_ID]") + + if not os_auth_url: + if os_auth_system and os_auth_system != 'keystone': + os_auth_url = auth_plugin.get_auth_url() + + if not os_auth_url: + raise exc.CommandError("You must provide an auth url " + "via either --os-auth-url or env[OS_AUTH_URL] " + "or specify an auth_system which defines a " + "default url with --os-auth-system " + "or env[OS_AUTH_SYSTEM]") + + if (options.os_compute_api_version and + options.os_compute_api_version != '1.0'): + if not os_tenant_name and not os_tenant_id: + raise exc.CommandError("You must provide a tenant name " + "or tenant id via --os-tenant-name, " + "--os-tenant-id, env[OS_TENANT_NAME] " + "or env[OS_TENANT_ID]") + + if not os_auth_url: + raise exc.CommandError("You must provide an auth url " + "via either --os-auth-url or env[OS_AUTH_URL]") + + self.cs = client.Client(options.os_compute_api_version, os_username, + os_password, os_tenant_name, tenant_id=os_tenant_id, + auth_url=os_auth_url, insecure=insecure, + region_name=os_region_name, endpoint_type=endpoint_type, + extensions=self.extensions, service_type=service_type, + service_name=service_name, auth_system=os_auth_system, + auth_plugin=auth_plugin, + volume_service_name=volume_service_name, + timings=args.timings, bypass_url=bypass_url, + os_cache=os_cache, http_log_debug=options.debug, + cacert=cacert, timeout=timeout) + + # Now check for the password/token of which pieces of the + # identifying keyring key can come from the underlying client + if not utils.isunauthenticated(args.func): + helper = SecretsHelper(args, self.cs.client) + if (auth_plugin and auth_plugin.opts and + "os_password" not in auth_plugin.opts): + use_pw = False + else: + use_pw = True + + tenant_id, auth_token, management_url = (helper.tenant_id, + helper.auth_token, + helper.management_url) + if tenant_id and auth_token and management_url: + self.cs.client.tenant_id = tenant_id + self.cs.client.auth_token = auth_token + self.cs.client.management_url = management_url + # Try to auth with the given info, if it fails + # go into password mode... + try: + self.cs.authenticate() + use_pw = False + except (exc.Unauthorized, exc.AuthorizationFailure): + # Likely it expired or just didn't work... + self.cs.client.auth_token = None + self.cs.client.management_url = None + if use_pw: + # Auth using token must have failed or not happened + # at all, so now switch to password mode and save + # the token when its gotten... using our keyring + # saver + os_password = helper.password + if not os_password: + raise exc.CommandError( + 'Expecting a password provided via either ' + '--os-password, env[OS_PASSWORD], or ' + 'prompted response') + self.cs.client.password = os_password + self.cs.client.keyring_saver = helper + + try: + if not utils.isunauthenticated(args.func): + self.cs.authenticate() + except exc.Unauthorized: + raise exc.CommandError("Invalid OpenStack Nova credentials.") + except exc.AuthorizationFailure: + raise exc.CommandError("Unable to authorize user") + + args.func(self.cs, args) + + if args.timings: + self._dump_timings(self.cs.get_timings()) + + def _dump_timings(self, timings): + class Tyme(object): + def __init__(self, url, seconds): + self.url = url + self.seconds = seconds + results = [Tyme(url, end - start) for url, start, end in timings] + total = 0.0 + for tyme in results: + total += tyme.seconds + results.append(Tyme("Total", total)) + utils.print_list(results, ["url", "seconds"], sortby_index=None) + + def _run_extension_hooks(self, hook_type, *args, **kwargs): + """Run hooks for all registered extensions.""" + for extension in self.extensions: + extension.run_hooks(hook_type, *args, **kwargs) + + def do_bash_completion(self, _args): + """ + Prints all of the commands and options to stdout so that the + nova.bash_completion script doesn't have to hard code them. + """ + commands = set() + options = set() + for sc_str, sc in self.subcommands.items(): + commands.add(sc_str) + for option in sc._optionals._option_string_actions.keys(): + options.add(option) + + commands.remove('bash-completion') + commands.remove('bash_completion') + print(' '.join(commands | options)) + + @utils.arg('command', metavar='', nargs='?', + help='Display help for ') + def do_help(self, args): + """ + Display help about this program or one of its subcommands. + """ + if args.command: + if args.command in self.subcommands: + self.subcommands[args.command].print_help() + else: + raise exc.CommandError("'%s' is not a valid subcommand" % + args.command) + else: + self.parser.print_help() + + +# I'm picky about my shell help. +class OpenStackHelpFormatter(argparse.HelpFormatter): + def start_section(self, heading): + # Title-case the headings + heading = '%s%s' % (heading[0].upper(), heading[1:]) + super(OpenStackHelpFormatter, self).start_section(heading) + + +def main(): + try: + OpenStackComputeShell().main(map(strutils.safe_decode, sys.argv[1:])) + + except Exception as e: + logger.debug(e, exc_info=1) + print("ERROR: %s" % strutils.safe_encode(six.text_type(e)), + file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/awx/lib/site-packages/novaclient/tests/__init__.py b/awx/lib/site-packages/novaclient/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/novaclient/tests/fakes.py b/awx/lib/site-packages/novaclient/tests/fakes.py new file mode 100644 index 0000000000..bc3683a4ab --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/fakes.py @@ -0,0 +1,80 @@ +""" +A fake server that "responds" to API methods with pre-canned responses. + +All of these responses come from the spec, so if for some reason the spec's +wrong the tests might raise AssertionError. I've indicated in comments the +places where actual behavior differs from the spec. +""" + +from novaclient import base + + +def assert_has_keys(dict, required=[], optional=[]): + keys = dict.keys() + for k in required: + try: + assert k in keys + except AssertionError: + extra_keys = set(keys).difference(set(required + optional)) + raise AssertionError("found unexpected keys: %s" % + list(extra_keys)) + + +class FakeClient(object): + + def assert_called(self, method, url, body=None, pos=-1): + """ + Assert than an API method was just called. + """ + expected = (method, url) + called = self.client.callstack[pos][0:2] + + assert self.client.callstack, \ + "Expected %s %s but no calls were made." % expected + + assert expected == called, 'Expected %s %s; got %s %s' % \ + (expected + called) + + if body is not None: + if self.client.callstack[pos][2] != body: + raise AssertionError('%r != %r' % + (self.client.callstack[pos][2], body)) + + def assert_called_anytime(self, method, url, body=None): + """ + Assert than an API method was called anytime in the test. + """ + expected = (method, url) + + assert self.client.callstack, \ + "Expected %s %s but no calls were made." % expected + + found = False + for entry in self.client.callstack: + if expected == entry[0:2]: + found = True + break + + assert found, 'Expected %s; got %s' % \ + (expected, self.client.callstack) + if body is not None: + try: + assert entry[2] == body + except AssertionError: + print(entry[2]) + print("!=") + print(body) + raise + + self.client.callstack = [] + + def clear_callstack(self): + self.client.callstack = [] + + def authenticate(self): + pass + + +# Fake class that will be used as an extension +class FakeManager(base.Manager): + pass diff --git a/awx/lib/site-packages/novaclient/tests/test_auth_plugins.py b/awx/lib/site-packages/novaclient/tests/test_auth_plugins.py new file mode 100644 index 0000000000..96217991aa --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/test_auth_plugins.py @@ -0,0 +1,346 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import mock +import pkg_resources +import requests + +try: + import json +except ImportError: + import simplejson as json + +from novaclient import auth_plugin +from novaclient import exceptions +from novaclient.v1_1 import client +from novaclient.tests import utils + + +def mock_http_request(resp=None): + """Mock an HTTP Request.""" + if not resp: + resp = { + "access": { + "token": { + "expires": "12345", + "id": "FAKE_ID", + "tenant": { + "id": "FAKE_TENANT_ID", + } + }, + "serviceCatalog": [ + { + "type": "compute", + "endpoints": [ + { + "region": "RegionOne", + "adminURL": "http://localhost:8774/v1.1", + "internalURL": "http://localhost:8774/v1.1", + "publicURL": "http://localhost:8774/v1.1/", + }, + ], + }, + ], + }, + } + + auth_response = utils.TestResponse({ + "status_code": 200, + "text": json.dumps(resp), + }) + return mock.Mock(return_value=(auth_response)) + + +def requested_headers(cs): + """Return requested passed headers.""" + return { + 'User-Agent': cs.client.USER_AGENT, + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + + +class DeprecatedAuthPluginTest(utils.TestCase): + def test_auth_system_success(self): + class MockEntrypoint(pkg_resources.EntryPoint): + def load(self): + return self.authenticate + + def authenticate(self, cls, auth_url): + cls._authenticate(auth_url, {"fake": "me"}) + + def mock_iter_entry_points(_type, name): + if _type == 'openstack.client.authenticate': + return [MockEntrypoint("fake", "fake", ["fake"])] + else: + return [] + + mock_request = mock_http_request() + + @mock.patch.object(pkg_resources, "iter_entry_points", + mock_iter_entry_points) + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + plugin = auth_plugin.DeprecatedAuthPlugin("fake") + cs = client.Client("username", "password", "project_id", + "auth_url/v2.0", auth_system="fake", + auth_plugin=plugin) + cs.client.authenticate() + + headers = requested_headers(cs) + token_url = cs.client.auth_url + "/tokens" + + mock_request.assert_called_with( + "POST", + token_url, + headers=headers, + data='{"fake": "me"}', + allow_redirects=True, + **self.TEST_REQUEST_BASE) + + test_auth_call() + + def test_auth_system_not_exists(self): + def mock_iter_entry_points(_t, name=None): + return [pkg_resources.EntryPoint("fake", "fake", ["fake"])] + + mock_request = mock_http_request() + + @mock.patch.object(pkg_resources, "iter_entry_points", + mock_iter_entry_points) + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + auth_plugin.discover_auth_systems() + plugin = auth_plugin.DeprecatedAuthPlugin("notexists") + cs = client.Client("username", "password", "project_id", + "auth_url/v2.0", auth_system="notexists", + auth_plugin=plugin) + self.assertRaises(exceptions.AuthSystemNotFound, + cs.client.authenticate) + + test_auth_call() + + def test_auth_system_defining_auth_url(self): + class MockAuthUrlEntrypoint(pkg_resources.EntryPoint): + def load(self): + return self.auth_url + + def auth_url(self): + return "http://faked/v2.0" + + class MockAuthenticateEntrypoint(pkg_resources.EntryPoint): + def load(self): + return self.authenticate + + def authenticate(self, cls, auth_url): + cls._authenticate(auth_url, {"fake": "me"}) + + def mock_iter_entry_points(_type, name): + if _type == 'openstack.client.auth_url': + return [MockAuthUrlEntrypoint("fakewithauthurl", + "fakewithauthurl", + ["auth_url"])] + elif _type == 'openstack.client.authenticate': + return [MockAuthenticateEntrypoint("fakewithauthurl", + "fakewithauthurl", + ["authenticate"])] + else: + return [] + + mock_request = mock_http_request() + + @mock.patch.object(pkg_resources, "iter_entry_points", + mock_iter_entry_points) + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + plugin = auth_plugin.DeprecatedAuthPlugin("fakewithauthurl") + cs = client.Client("username", "password", "project_id", + auth_system="fakewithauthurl", + auth_plugin=plugin) + cs.client.authenticate() + self.assertEqual(cs.client.auth_url, "http://faked/v2.0") + + test_auth_call() + + @mock.patch.object(pkg_resources, "iter_entry_points") + def test_client_raises_exc_without_auth_url(self, mock_iter_entry_points): + class MockAuthUrlEntrypoint(pkg_resources.EntryPoint): + def load(self): + return self.auth_url + + def auth_url(self): + return None + + mock_iter_entry_points.side_effect = lambda _t, name: [ + MockAuthUrlEntrypoint("fakewithauthurl", + "fakewithauthurl", + ["auth_url"])] + + plugin = auth_plugin.DeprecatedAuthPlugin("fakewithauthurl") + self.assertRaises( + exceptions.EndpointNotFound, + client.Client, "username", "password", "project_id", + auth_system="fakewithauthurl", auth_plugin=plugin) + + +class AuthPluginTest(utils.TestCase): + @mock.patch.object(requests.Session, "request") + @mock.patch.object(pkg_resources, "iter_entry_points") + def test_auth_system_success(self, mock_iter_entry_points, mock_request): + """Test that we can authenticate using the auth system.""" + class MockEntrypoint(pkg_resources.EntryPoint): + def load(self): + return FakePlugin + + class FakePlugin(auth_plugin.BaseAuthPlugin): + def authenticate(self, cls, auth_url): + cls._authenticate(auth_url, {"fake": "me"}) + + mock_iter_entry_points.side_effect = lambda _t: [ + MockEntrypoint("fake", "fake", ["FakePlugin"])] + + mock_request.side_effect = mock_http_request() + + auth_plugin.discover_auth_systems() + plugin = auth_plugin.load_plugin("fake") + cs = client.Client("username", "password", "project_id", + "auth_url/v2.0", auth_system="fake", + auth_plugin=plugin) + cs.client.authenticate() + + headers = requested_headers(cs) + token_url = cs.client.auth_url + "/tokens" + + mock_request.assert_called_with( + "POST", + token_url, + headers=headers, + data='{"fake": "me"}', + allow_redirects=True, + **self.TEST_REQUEST_BASE) + + @mock.patch.object(pkg_resources, "iter_entry_points") + def test_discover_auth_system_options(self, mock_iter_entry_points): + """Test that we can load the auth system options.""" + class FakePlugin(auth_plugin.BaseAuthPlugin): + @staticmethod + def add_opts(parser): + parser.add_argument('--auth_system_opt', + default=False, + action='store_true', + help="Fake option") + return parser + + class MockEntrypoint(pkg_resources.EntryPoint): + def load(self): + return FakePlugin + + mock_iter_entry_points.side_effect = lambda _t: [ + MockEntrypoint("fake", "fake", ["FakePlugin"])] + + parser = argparse.ArgumentParser() + auth_plugin.discover_auth_systems() + auth_plugin.load_auth_system_opts(parser) + opts, args = parser.parse_known_args(['--auth_system_opt']) + + self.assertTrue(opts.auth_system_opt) + + @mock.patch.object(pkg_resources, "iter_entry_points") + def test_parse_auth_system_options(self, mock_iter_entry_points): + """Test that we can parse the auth system options.""" + class MockEntrypoint(pkg_resources.EntryPoint): + def load(self): + return FakePlugin + + class FakePlugin(auth_plugin.BaseAuthPlugin): + def __init__(self): + self.opts = {"fake_argument": True} + + def parse_opts(self, args): + return self.opts + + mock_iter_entry_points.side_effect = lambda _t: [ + MockEntrypoint("fake", "fake", ["FakePlugin"])] + + auth_plugin.discover_auth_systems() + plugin = auth_plugin.load_plugin("fake") + + plugin.parse_opts([]) + self.assertIn("fake_argument", plugin.opts) + + @mock.patch.object(pkg_resources, "iter_entry_points") + def test_auth_system_defining_url(self, mock_iter_entry_points): + """Test the auth_system defining an url.""" + class MockEntrypoint(pkg_resources.EntryPoint): + def load(self): + return FakePlugin + + class FakePlugin(auth_plugin.BaseAuthPlugin): + def get_auth_url(self): + return "http://faked/v2.0" + + mock_iter_entry_points.side_effect = lambda _t: [ + MockEntrypoint("fake", "fake", ["FakePlugin"])] + + auth_plugin.discover_auth_systems() + plugin = auth_plugin.load_plugin("fake") + + cs = client.Client("username", "password", "project_id", + auth_system="fakewithauthurl", + auth_plugin=plugin) + self.assertEqual(cs.client.auth_url, "http://faked/v2.0") + + @mock.patch.object(pkg_resources, "iter_entry_points") + def test_exception_if_no_authenticate(self, mock_iter_entry_points): + """Test that no authenticate raises a proper exception.""" + class MockEntrypoint(pkg_resources.EntryPoint): + def load(self): + return FakePlugin + + class FakePlugin(auth_plugin.BaseAuthPlugin): + pass + + mock_iter_entry_points.side_effect = lambda _t: [ + MockEntrypoint("fake", "fake", ["FakePlugin"])] + + auth_plugin.discover_auth_systems() + plugin = auth_plugin.load_plugin("fake") + + self.assertRaises( + exceptions.EndpointNotFound, + client.Client, "username", "password", "project_id", + auth_system="fake", auth_plugin=plugin) + + @mock.patch.object(pkg_resources, "iter_entry_points") + def test_exception_if_no_url(self, mock_iter_entry_points): + """Test that no auth_url at all raises exception.""" + class MockEntrypoint(pkg_resources.EntryPoint): + def load(self): + return FakePlugin + + class FakePlugin(auth_plugin.BaseAuthPlugin): + pass + + mock_iter_entry_points.side_effect = lambda _t: [ + MockEntrypoint("fake", "fake", ["FakePlugin"])] + + auth_plugin.discover_auth_systems() + plugin = auth_plugin.load_plugin("fake") + + self.assertRaises( + exceptions.EndpointNotFound, + client.Client, "username", "password", "project_id", + auth_system="fake", auth_plugin=plugin) diff --git a/awx/lib/site-packages/novaclient/tests/test_base.py b/awx/lib/site-packages/novaclient/tests/test_base.py new file mode 100644 index 0000000000..58aeae962c --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/test_base.py @@ -0,0 +1,56 @@ +from novaclient import base +from novaclient import exceptions +from novaclient.v1_1 import flavors +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class BaseTest(utils.TestCase): + + def test_resource_repr(self): + r = base.Resource(None, dict(foo="bar", baz="spam")) + self.assertEqual(repr(r), "") + + def test_getid(self): + self.assertEqual(base.getid(4), 4) + + class TmpObject(object): + id = 4 + self.assertEqual(base.getid(TmpObject), 4) + + def test_resource_lazy_getattr(self): + f = flavors.Flavor(cs.flavors, {'id': 1}) + self.assertEqual(f.name, '256 MB Server') + cs.assert_called('GET', '/flavors/1') + + # Missing stuff still fails after a second get + self.assertRaises(AttributeError, getattr, f, 'blahblah') + + def test_eq(self): + # Two resources of the same type with the same id: equal + r1 = base.Resource(None, {'id': 1, 'name': 'hi'}) + r2 = base.Resource(None, {'id': 1, 'name': 'hello'}) + self.assertEqual(r1, r2) + + # Two resoruces of different types: never equal + r1 = base.Resource(None, {'id': 1}) + r2 = flavors.Flavor(None, {'id': 1}) + self.assertNotEqual(r1, r2) + + # Two resources with no ID: equal if their info is equal + r1 = base.Resource(None, {'name': 'joe', 'age': 12}) + r2 = base.Resource(None, {'name': 'joe', 'age': 12}) + self.assertEqual(r1, r2) + + def test_findall_invalid_attribute(self): + # Make sure findall with an invalid attribute doesn't cause errors. + # The following should not raise an exception. + cs.flavors.findall(vegetable='carrot') + + # However, find() should raise an error + self.assertRaises(exceptions.NotFound, + cs.flavors.find, + vegetable='carrot') diff --git a/awx/lib/site-packages/novaclient/tests/test_client.py b/awx/lib/site-packages/novaclient/tests/test_client.py new file mode 100644 index 0000000000..3d7743166d --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/test_client.py @@ -0,0 +1,188 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import mock +import requests + +import novaclient.client +import novaclient.extension +import novaclient.tests.fakes as fakes +import novaclient.v1_1.client +import novaclient.v3.client +from novaclient.tests import utils + + +class ClientTest(utils.TestCase): + + def test_client_with_timeout(self): + instance = novaclient.client.HTTPClient(user='user', + password='password', + projectid='project', + timeout=2, + auth_url="http://www.blah.com") + self.assertEqual(instance.timeout, 2) + mock_request = mock.Mock() + mock_request.return_value = requests.Response() + mock_request.return_value.status_code = 200 + mock_request.return_value.headers = { + 'x-server-management-url': 'blah.com', + 'x-auth-token': 'blah', + } + with mock.patch('requests.Session.request', mock_request): + instance.authenticate() + requests.Session.request.assert_called_with(mock.ANY, mock.ANY, + timeout=2, + headers=mock.ANY, + verify=mock.ANY) + + def test_client_reauth(self): + instance = novaclient.client.HTTPClient(user='user', + password='password', + projectid='project', + timeout=2, + auth_url="http://www.blah.com") + instance.auth_token = 'foobar' + instance.management_url = 'http://example.com' + instance.version = 'v2.0' + mock_request = mock.Mock() + mock_request.side_effect = novaclient.exceptions.Unauthorized(401) + with mock.patch('requests.Session.request', mock_request): + try: + instance.get('/servers/detail') + except Exception: + pass + get_headers = {'X-Auth-Project-Id': 'project', + 'X-Auth-Token': 'foobar', + 'User-Agent': 'python-novaclient', + 'Accept': 'application/json'} + reauth_headers = {'Content-Type': 'application/json', + 'Accept': 'application/json', + 'User-Agent': 'python-novaclient'} + data = ('{"auth": {"tenantName": "project", "passwordCredentials":' + ' {"username": "user", "password": "password"}}}') + expected = [mock.call('GET', + 'http://example.com/servers/detail', + timeout=mock.ANY, + headers=get_headers, + verify=mock.ANY), + mock.call('POST', 'http://www.blah.com/tokens', + timeout=mock.ANY, + headers=reauth_headers, + allow_redirects=mock.ANY, + data=data, + verify=mock.ANY)] + self.assertEqual(mock_request.call_args_list, expected) + + def test_get_client_class_v3(self): + output = novaclient.client.get_client_class('3') + self.assertEqual(output, novaclient.v3.client.Client) + + def test_get_client_class_v2(self): + output = novaclient.client.get_client_class('2') + self.assertEqual(output, novaclient.v1_1.client.Client) + + def test_get_client_class_v2_int(self): + output = novaclient.client.get_client_class(2) + self.assertEqual(output, novaclient.v1_1.client.Client) + + def test_get_client_class_v1_1(self): + output = novaclient.client.get_client_class('1.1') + self.assertEqual(output, novaclient.v1_1.client.Client) + + def test_get_client_class_unknown(self): + self.assertRaises(novaclient.exceptions.UnsupportedVersion, + novaclient.client.get_client_class, '0') + + def test_client_with_os_cache_enabled(self): + cs = novaclient.v1_1.client.Client("user", "password", "project_id", + auth_url="foo/v2", os_cache=True) + self.assertEqual(True, cs.os_cache) + self.assertEqual(True, cs.client.os_cache) + + def test_client_with_os_cache_disabled(self): + cs = novaclient.v1_1.client.Client("user", "password", "project_id", + auth_url="foo/v2", os_cache=False) + self.assertEqual(False, cs.os_cache) + self.assertEqual(False, cs.client.os_cache) + + def test_client_with_no_cache_enabled(self): + cs = novaclient.v1_1.client.Client("user", "password", "project_id", + auth_url="foo/v2", no_cache=True) + self.assertEqual(False, cs.os_cache) + self.assertEqual(False, cs.client.os_cache) + + def test_client_with_no_cache_disabled(self): + cs = novaclient.v1_1.client.Client("user", "password", "project_id", + auth_url="foo/v2", no_cache=False) + self.assertEqual(True, cs.os_cache) + self.assertEqual(True, cs.client.os_cache) + + def test_client_set_management_url_v1_1(self): + cs = novaclient.v1_1.client.Client("user", "password", "project_id", + auth_url="foo/v2") + cs.set_management_url("blabla") + self.assertEqual("blabla", cs.client.management_url) + + def test_client_get_reset_timings_v1_1(self): + cs = novaclient.v1_1.client.Client("user", "password", "project_id", + auth_url="foo/v2") + self.assertEqual(0, len(cs.get_timings())) + cs.client.times.append("somevalue") + self.assertEqual(1, len(cs.get_timings())) + self.assertEqual("somevalue", cs.get_timings()[0]) + + cs.reset_timings() + self.assertEqual(0, len(cs.get_timings())) + + def test_client_set_management_url_v3(self): + cs = novaclient.v3.client.Client("user", "password", "project_id", + auth_url="foo/v2") + cs.set_management_url("blabla") + self.assertEqual("blabla", cs.client.management_url) + + def test_client_get_reset_timings_v3(self): + cs = novaclient.v3.client.Client("user", "password", "project_id", + auth_url="foo/v2") + self.assertEqual(0, len(cs.get_timings())) + cs.client.times.append("somevalue") + self.assertEqual(["somevalue"], cs.get_timings()) + + cs.reset_timings() + self.assertEquals(0, len(cs.get_timings())) + + def test_clent_extensions_v3(self): + fake_attribute_name1 = "FakeAttribute1" + fake_attribute_name2 = "FakeAttribute2" + extensions = [ + novaclient.extension.Extension(fake_attribute_name1, + fakes), + novaclient.extension.Extension(fake_attribute_name2, + utils), + ] + + cs = novaclient.v3.client.Client("user", "password", "project_id", + auth_url="foo/v2", + extensions=extensions) + self.assertTrue(isinstance(getattr(cs, fake_attribute_name1, None), + fakes.FakeManager)) + self.assertFalse(hasattr(cs, fake_attribute_name2)) + + @mock.patch.object(novaclient.client.HTTPClient, 'authenticate') + def test_authenticate_call_v3(self, mock_authenticate): + cs = novaclient.v3.client.Client("user", "password", "project_id", + auth_url="foo/v2") + cs.authenticate() + self.assertTrue(mock_authenticate.called) diff --git a/awx/lib/site-packages/novaclient/tests/test_discover.py b/awx/lib/site-packages/novaclient/tests/test_discover.py new file mode 100644 index 0000000000..29c9e4756d --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/test_discover.py @@ -0,0 +1,79 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import imp +import inspect +import pkg_resources + +import novaclient.shell +from novaclient.tests import utils + + +class DiscoverTest(utils.TestCase): + + def test_discover_via_entry_points(self): + + def mock_iter_entry_points(group): + if group == 'novaclient.extension': + fake_ep = mock.Mock() + fake_ep.name = 'foo' + fake_ep.module = imp.new_module('foo') + fake_ep.load.return_value = fake_ep.module + return [fake_ep] + + @mock.patch.object(pkg_resources, 'iter_entry_points', + mock_iter_entry_points) + def test(): + shell = novaclient.shell.OpenStackComputeShell() + for name, module in shell._discover_via_entry_points(): + self.assertEqual(name, 'foo') + self.assertTrue(inspect.ismodule(module)) + + test() + + def test_discover_extensions(self): + + def mock_discover_via_python_path(self): + yield 'foo', imp.new_module('foo') + + def mock_discover_via_contrib_path(self, version): + yield 'bar', imp.new_module('bar') + + def mock_discover_via_entry_points(self): + yield 'baz', imp.new_module('baz') + + @mock.patch.object(novaclient.shell.OpenStackComputeShell, + '_discover_via_python_path', + mock_discover_via_python_path) + @mock.patch.object(novaclient.shell.OpenStackComputeShell, + '_discover_via_contrib_path', + mock_discover_via_contrib_path) + @mock.patch.object(novaclient.shell.OpenStackComputeShell, + '_discover_via_entry_points', + mock_discover_via_entry_points) + def test(): + shell = novaclient.shell.OpenStackComputeShell() + extensions = shell._discover_extensions('1.1') + self.assertEqual(len(extensions), 3) + names = sorted(['foo', 'bar', 'baz']) + sorted_extensions = sorted(extensions, key=lambda ext: ext.name) + for i in range(len(names)): + ext = sorted_extensions[i] + name = names[i] + self.assertEqual(ext.name, name) + self.assertTrue(inspect.ismodule(ext.module)) + + test() diff --git a/awx/lib/site-packages/novaclient/tests/test_http.py b/awx/lib/site-packages/novaclient/tests/test_http.py new file mode 100644 index 0000000000..f6ebb7a7ec --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/test_http.py @@ -0,0 +1,122 @@ +import mock +import requests + +from novaclient import client +from novaclient import exceptions +from novaclient.tests import utils + + +fake_response = utils.TestResponse({ + "status_code": 200, + "text": '{"hi": "there"}', +}) +mock_request = mock.Mock(return_value=(fake_response)) + +refused_response = utils.TestResponse({ + "status_code": 400, + "text": '[Errno 111] Connection refused', +}) +refused_mock_request = mock.Mock(return_value=(refused_response)) + +bad_req_response = utils.TestResponse({ + "status_code": 400, + "text": '', +}) +bad_req_mock_request = mock.Mock(return_value=(bad_req_response)) + + +def get_client(): + cl = client.HTTPClient("username", "password", + "project_id", "auth_test") + return cl + + +def get_authed_client(): + cl = get_client() + cl.management_url = "http://example.com" + cl.auth_token = "token" + return cl + + +class ClientTest(utils.TestCase): + + def test_get(self): + cl = get_authed_client() + + @mock.patch.object(requests.Session, "request", mock_request) + @mock.patch('time.time', mock.Mock(return_value=1234)) + def test_get_call(): + resp, body = cl.get("/hi") + headers = {"X-Auth-Token": "token", + "X-Auth-Project-Id": "project_id", + "User-Agent": cl.USER_AGENT, + 'Accept': 'application/json', + } + mock_request.assert_called_with( + "GET", + "http://example.com/hi", + headers=headers, + **self.TEST_REQUEST_BASE) + # Automatic JSON parsing + self.assertEqual(body, {"hi": "there"}) + + test_get_call() + + def test_post(self): + cl = get_authed_client() + + @mock.patch.object(requests.Session, "request", mock_request) + def test_post_call(): + cl.post("/hi", body=[1, 2, 3]) + headers = { + "X-Auth-Token": "token", + "X-Auth-Project-Id": "project_id", + "Content-Type": "application/json", + 'Accept': 'application/json', + "User-Agent": cl.USER_AGENT + } + mock_request.assert_called_with( + "POST", + "http://example.com/hi", + headers=headers, + data='[1, 2, 3]', + **self.TEST_REQUEST_BASE) + + test_post_call() + + def test_auth_failure(self): + cl = get_client() + + # response must not have x-server-management-url header + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + self.assertRaises(exceptions.AuthorizationFailure, cl.authenticate) + + test_auth_call() + + def test_connection_refused(self): + cl = get_client() + + @mock.patch.object(requests.Session, "request", refused_mock_request) + def test_refused_call(): + self.assertRaises(exceptions.ConnectionRefused, cl.get, "/hi") + + test_refused_call() + + def test_bad_request(self): + cl = get_client() + + @mock.patch.object(requests.Session, "request", bad_req_mock_request) + def test_refused_call(): + self.assertRaises(exceptions.BadRequest, cl.get, "/hi") + + test_refused_call() + + def test_client_logger(self): + cl1 = client.HTTPClient("username", "password", "project_id", + "auth_test", http_log_debug=True) + self.assertEqual(len(cl1._logger.handlers), 1) + + cl2 = client.HTTPClient("username", "password", "project_id", + "auth_test", http_log_debug=True) + self.assertEqual(len(cl2._logger.handlers), 1) diff --git a/awx/lib/site-packages/novaclient/tests/test_service_catalog.py b/awx/lib/site-packages/novaclient/tests/test_service_catalog.py new file mode 100644 index 0000000000..a34d82ffb0 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/test_service_catalog.py @@ -0,0 +1,144 @@ +from novaclient import exceptions +from novaclient import service_catalog +from novaclient.tests import utils + + +# Taken directly from keystone/content/common/samples/auth.json +# Do not edit this structure. Instead, grab the latest from there. + +SERVICE_CATALOG = { + "access": { + "token": { + "id": "ab48a9efdfedb23ty3494", + "expires": "2010-11-01T03:32:15-05:00", + "tenant": { + "id": "345", + "name": "My Project" + } + }, + "user": { + "id": "123", + "name": "jqsmith", + "roles": [ + { + "id": "234", + "name": "compute:admin", + }, + { + "id": "235", + "name": "object-store:admin", + "tenantId": "1", + } + ], + "roles_links": [], + }, + "serviceCatalog": [ + { + "name": "Cloud Servers", + "type": "compute", + "endpoints": [ + { + # Tenant 1, no region, v1.0 + "tenantId": "1", + "publicURL": "https://compute1.host/v1/1", + "internalURL": "https://compute1.host/v1/1", + "versionId": "1.0", + "versionInfo": "https://compute1.host/v1.0/", + "versionList": "https://compute1.host/" + }, + { + # Tenant 2, with region, v1.1 + "tenantId": "2", + "publicURL": "https://compute1.host/v1.1/2", + "internalURL": "https://compute1.host/v1.1/2", + "region": "North", + "versionId": "1.1", + "versionInfo": "https://compute1.host/v1.1/", + "versionList": "https://compute1.host/" + }, + { + # Tenant 1, with region, v2.0 + "tenantId": "1", + "publicURL": "https://compute1.host/v2/1", + "internalURL": "https://compute1.host/v2/1", + "region": "North", + "versionId": "2", + "versionInfo": "https://compute1.host/v2/", + "versionList": "https://compute1.host/" + }, + ], + "endpoints_links": [], + }, + { + "name": "Nova Volumes", + "type": "volume", + "endpoints": [ + { + "tenantId": "1", + "publicURL": "https://volume1.host/v1/1", + "internalURL": "https://volume1.host/v1/1", + "region": "South", + "versionId": "1.0", + "versionInfo": "uri", + "versionList": "uri" + }, + { + "tenantId": "2", + "publicURL": "https://volume1.host/v1.1/2", + "internalURL": "https://volume1.host/v1.1/2", + "region": "South", + "versionId": "1.1", + "versionInfo": "https://volume1.host/v1.1/", + "versionList": "https://volume1.host/" + }, + ], + "endpoints_links": [ + { + "rel": "next", + "href": "https://identity1.host/v2.0/endpoints" + }, + ], + }, + ], + "serviceCatalog_links": [ + { + "rel": "next", + "href": "https://identity.host/v2.0/endpoints?session=2hfh8Ar", + }, + ], + }, +} + + +class ServiceCatalogTest(utils.TestCase): + def test_building_a_service_catalog(self): + sc = service_catalog.ServiceCatalog(SERVICE_CATALOG) + + self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for, + service_type='compute') + self.assertEqual(sc.url_for('tenantId', '1', service_type='compute'), + "https://compute1.host/v2/1") + self.assertEqual(sc.url_for('tenantId', '2', service_type='compute'), + "https://compute1.host/v1.1/2") + + self.assertRaises(exceptions.EndpointNotFound, sc.url_for, + "region", "South", service_type='compute') + + def test_building_a_service_catalog_insensitive_case(self): + sc = service_catalog.ServiceCatalog(SERVICE_CATALOG) + # Matching south (and catalog has South). + self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for, + 'region', 'south', service_type='volume') + + def test_alternate_service_type(self): + sc = service_catalog.ServiceCatalog(SERVICE_CATALOG) + + self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for, + service_type='volume') + self.assertEqual(sc.url_for('tenantId', '1', service_type='volume'), + "https://volume1.host/v1/1") + self.assertEqual(sc.url_for('tenantId', '2', service_type='volume'), + "https://volume1.host/v1.1/2") + + self.assertRaises(exceptions.EndpointNotFound, sc.url_for, + "region", "North", service_type='volume') diff --git a/awx/lib/site-packages/novaclient/tests/test_shell.py b/awx/lib/site-packages/novaclient/tests/test_shell.py new file mode 100644 index 0000000000..ae9e6283f0 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/test_shell.py @@ -0,0 +1,200 @@ +import io +import prettytable +import re +import sys + +from distutils.version import StrictVersion + +import fixtures +import mock +from testtools import matchers + +import novaclient.client +from novaclient import exceptions +import novaclient.shell +from novaclient.tests import utils + +FAKE_ENV = {'OS_USERNAME': 'username', + 'OS_PASSWORD': 'password', + 'OS_TENANT_NAME': 'tenant_name', + 'OS_AUTH_URL': 'http://no.where'} + +FAKE_ENV2 = {'OS_USERNAME': 'username', + 'OS_PASSWORD': 'password', + 'OS_TENANT_ID': 'tenant_id', + 'OS_AUTH_URL': 'http://no.where'} + + +class ShellTest(utils.TestCase): + + def make_env(self, exclude=None, fake_env=FAKE_ENV): + env = dict((k, v) for k, v in fake_env.items() if k != exclude) + self.useFixture(fixtures.MonkeyPatch('os.environ', env)) + + def setUp(self): + super(ShellTest, self).setUp() + self.useFixture(fixtures.MonkeyPatch( + 'novaclient.client.get_client_class', + mock.MagicMock)) + self.nc_util = mock.patch('novaclient.utils.isunauthenticated').start() + self.nc_util.return_value = False + + def shell(self, argstr, exitcodes=(0,)): + orig = sys.stdout + orig_stderr = sys.stderr + try: + sys.stdout = io.BytesIO() + sys.stderr = io.BytesIO() + _shell = novaclient.shell.OpenStackComputeShell() + _shell.main(argstr.split()) + except SystemExit: + exc_type, exc_value, exc_traceback = sys.exc_info() + self.assertIn(exc_value.code, exitcodes) + finally: + stdout = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = orig + stderr = sys.stderr.getvalue() + sys.stderr.close() + sys.stderr = orig_stderr + return (stdout, stderr) + + def test_help_unknown_command(self): + self.assertRaises(exceptions.CommandError, self.shell, 'help foofoo') + + def test_invalid_timeout(self): + for f in [0, -1, -10]: + cmd_text = '--timeout %s' % (f) + stdout, stderr = self.shell(cmd_text, exitcodes=[0, 2]) + required = [ + 'argument --timeout: %s must be greater than 0' % (f), + ] + for r in required: + self.assertIn(r, stderr) + + def test_help(self): + required = [ + '.*?^usage: ', + '.*?^\s+root-password\s+Change the root password', + '.*?^See "nova help COMMAND" for help on a specific command', + ] + stdout, stderr = self.shell('help') + for r in required: + self.assertThat((stdout + stderr), + matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE)) + + def test_help_on_subcommand(self): + required = [ + '.*?^usage: nova root-password', + '.*?^Change the root password', + '.*?^Positional arguments:', + ] + stdout, stderr = self.shell('help root-password') + for r in required: + self.assertThat((stdout + stderr), + matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE)) + + def test_help_no_options(self): + required = [ + '.*?^usage: ', + '.*?^\s+root-password\s+Change the root password', + '.*?^See "nova help COMMAND" for help on a specific command', + ] + stdout, stderr = self.shell('') + for r in required: + self.assertThat((stdout + stderr), + matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE)) + + def test_bash_completion(self): + stdout, stderr = self.shell('bash-completion') + # just check we have some output + required = [ + '.*--matching', + '.*--wrap', + '.*help', + '.*secgroup-delete-rule', + '.*--priority'] + for r in required: + self.assertThat((stdout + stderr), + matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE)) + + def test_no_username(self): + required = ('You must provide a username' + ' via either --os-username or env[OS_USERNAME]',) + self.make_env(exclude='OS_USERNAME') + try: + self.shell('list') + except exceptions.CommandError as message: + self.assertEqual(required, message.args) + else: + self.fail('CommandError not raised') + + def test_no_tenant_name(self): + required = ('You must provide a tenant name or tenant id' + ' via --os-tenant-name, --os-tenant-id,' + ' env[OS_TENANT_NAME] or env[OS_TENANT_ID]',) + self.make_env(exclude='OS_TENANT_NAME') + try: + self.shell('list') + except exceptions.CommandError as message: + self.assertEqual(required, message.args) + else: + self.fail('CommandError not raised') + + def test_no_tenant_id(self): + required = ('You must provide a tenant name or tenant id' + ' via --os-tenant-name, --os-tenant-id,' + ' env[OS_TENANT_NAME] or env[OS_TENANT_ID]',) + self.make_env(exclude='OS_TENANT_ID', fake_env=FAKE_ENV2) + try: + self.shell('list') + except exceptions.CommandError as message: + self.assertEqual(required, message.args) + else: + self.fail('CommandError not raised') + + def test_no_auth_url(self): + required = ('You must provide an auth url' + ' via either --os-auth-url or env[OS_AUTH_URL] or' + ' specify an auth_system which defines a default url' + ' with --os-auth-system or env[OS_AUTH_SYSTEM]',) + self.make_env(exclude='OS_AUTH_URL') + try: + self.shell('list') + except exceptions.CommandError as message: + self.assertEqual(required, message.args) + else: + self.fail('CommandError not raised') + + @mock.patch('sys.stdin', side_effect=mock.MagicMock) + @mock.patch('getpass.getpass', return_value='password') + def test_password(self, mock_getpass, mock_stdin): + # default output of empty tables differs depending between prettytable + # versions + if (hasattr(prettytable, '__version__') and + StrictVersion(prettytable.__version__) < StrictVersion('0.7.2')): + ex = '\n' + else: + ex = ( + '+----+------+--------+------------+-------------+----------+\n' + '| ID | Name | Status | Task State | Power State | Networks |\n' + '+----+------+--------+------------+-------------+----------+\n' + '+----+------+--------+------------+-------------+----------+\n' + ) + self.make_env(exclude='OS_PASSWORD') + stdout, stderr = self.shell('list') + self.assertEqual((stdout + stderr), ex) + + @mock.patch('sys.stdin', side_effect=mock.MagicMock) + @mock.patch('getpass.getpass', side_effect=EOFError) + def test_no_password(self, mock_getpass, mock_stdin): + required = ('Expecting a password provided' + ' via either --os-password, env[OS_PASSWORD],' + ' or prompted response',) + self.make_env(exclude='OS_PASSWORD') + try: + self.shell('list') + except exceptions.CommandError as message: + self.assertEqual(required, message.args) + else: + self.fail('CommandError not raised') diff --git a/awx/lib/site-packages/novaclient/tests/test_utils.py b/awx/lib/site-packages/novaclient/tests/test_utils.py new file mode 100644 index 0000000000..8809133aca --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/test_utils.py @@ -0,0 +1,165 @@ +import sys + +import mock +import six + +from novaclient import exceptions +from novaclient import utils +from novaclient import base +from novaclient.tests import utils as test_utils + +UUID = '8e8ec658-c7b0-4243-bdf8-6f7f2952c0d0' + + +class FakeResource(object): + NAME_ATTR = 'name' + + def __init__(self, _id, properties): + self.id = _id + try: + self.name = properties['name'] + except KeyError: + pass + + +class FakeManager(base.ManagerWithFind): + + resource_class = FakeResource + + resources = [ + FakeResource('1234', {'name': 'entity_one'}), + FakeResource(UUID, {'name': 'entity_two'}), + FakeResource('5678', {'name': '9876'}) + ] + + def get(self, resource_id): + for resource in self.resources: + if resource.id == str(resource_id): + return resource + raise exceptions.NotFound(resource_id) + + def list(self): + return self.resources + + +class FakeDisplayResource(object): + NAME_ATTR = 'display_name' + + def __init__(self, _id, properties): + self.id = _id + try: + self.display_name = properties['display_name'] + except KeyError: + pass + + +class FakeDisplayManager(FakeManager): + + resource_class = FakeDisplayResource + + resources = [ + FakeDisplayResource('4242', {'display_name': 'entity_three'}), + ] + + +class FindResourceTestCase(test_utils.TestCase): + + def setUp(self): + super(FindResourceTestCase, self).setUp() + self.manager = FakeManager(None) + + def test_find_none(self): + """Test a few non-valid inputs.""" + self.assertRaises(exceptions.CommandError, + utils.find_resource, + self.manager, + 'asdf') + self.assertRaises(exceptions.CommandError, + utils.find_resource, + self.manager, + None) + self.assertRaises(exceptions.CommandError, + utils.find_resource, + self.manager, + {}) + + def test_find_by_integer_id(self): + output = utils.find_resource(self.manager, 1234) + self.assertEqual(output, self.manager.get('1234')) + + def test_find_by_str_id(self): + output = utils.find_resource(self.manager, '1234') + self.assertEqual(output, self.manager.get('1234')) + + def test_find_by_uuid(self): + output = utils.find_resource(self.manager, UUID) + self.assertEqual(output, self.manager.get(UUID)) + + def test_find_by_str_name(self): + output = utils.find_resource(self.manager, 'entity_one') + self.assertEqual(output, self.manager.get('1234')) + + def test_find_by_str_displayname(self): + display_manager = FakeDisplayManager(None) + output = utils.find_resource(display_manager, 'entity_three') + self.assertEqual(output, display_manager.get('4242')) + + +class _FakeResult(object): + def __init__(self, name, value): + self.name = name + self.value = value + + +class PrintResultTestCase(test_utils.TestCase): + @mock.patch('sys.stdout', six.StringIO()) + def test_print_list_sort_by_str(self): + objs = [_FakeResult("k1", 1), + _FakeResult("k3", 2), + _FakeResult("k2", 3)] + + utils.print_list(objs, ["Name", "Value"], sortby_index=0) + + self.assertEqual(sys.stdout.getvalue(), + '+------+-------+\n' + '| Name | Value |\n' + '+------+-------+\n' + '| k1 | 1 |\n' + '| k2 | 3 |\n' + '| k3 | 2 |\n' + '+------+-------+\n') + + @mock.patch('sys.stdout', six.StringIO()) + def test_print_list_sort_by_integer(self): + objs = [_FakeResult("k1", 1), + _FakeResult("k3", 2), + _FakeResult("k2", 3)] + + utils.print_list(objs, ["Name", "Value"], sortby_index=1) + + self.assertEqual(sys.stdout.getvalue(), + '+------+-------+\n' + '| Name | Value |\n' + '+------+-------+\n' + '| k1 | 1 |\n' + '| k3 | 2 |\n' + '| k2 | 3 |\n' + '+------+-------+\n') + + # without sorting + @mock.patch('sys.stdout', six.StringIO()) + def test_print_list_sort_by_none(self): + objs = [_FakeResult("k1", 1), + _FakeResult("k3", 3), + _FakeResult("k2", 2)] + + utils.print_list(objs, ["Name", "Value"], sortby_index=None) + + self.assertEqual(sys.stdout.getvalue(), + '+------+-------+\n' + '| Name | Value |\n' + '+------+-------+\n' + '| k1 | 1 |\n' + '| k3 | 3 |\n' + '| k2 | 2 |\n' + '+------+-------+\n') diff --git a/awx/lib/site-packages/novaclient/tests/utils.py b/awx/lib/site-packages/novaclient/tests/utils.py new file mode 100644 index 0000000000..4fccff9b7c --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/utils.py @@ -0,0 +1,47 @@ +import os + +import fixtures +import requests +import testtools + + +class TestCase(testtools.TestCase): + TEST_REQUEST_BASE = { + 'verify': True, + } + + def setUp(self): + super(TestCase, self).setUp() + if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or + os.environ.get('OS_STDOUT_CAPTURE') == '1'): + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or + os.environ.get('OS_STDERR_CAPTURE') == '1'): + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + + +class TestResponse(requests.Response): + """ + Class used to wrap requests.Response and provide some + convenience to initialize with a dict + """ + + def __init__(self, data): + self._text = None + super(TestResponse, self) + if isinstance(data, dict): + self.status_code = data.get('status_code', None) + self.headers = data.get('headers', None) + # Fake the text attribute to streamline Response creation + self._text = data.get('text', None) + else: + self.status_code = data + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + @property + def text(self): + return self._text diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/__init__.py b/awx/lib/site-packages/novaclient/tests/v1_1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/__init__.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/fakes.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/fakes.py new file mode 100644 index 0000000000..b9e79e8412 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/fakes.py @@ -0,0 +1,137 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from novaclient.v1_1 import client +from novaclient.tests.v1_1 import fakes + + +class FakeClient(fakes.FakeClient): + def __init__(self, *args, **kwargs): + client.Client.__init__(self, 'username', 'password', + 'project_id', 'auth_url', + extensions=kwargs.get('extensions')) + self.client = FakeHTTPClient(**kwargs) + + +class FakeHTTPClient(fakes.FakeHTTPClient): + def get_os_tenant_networks(self): + return (200, {}, {'networks': [{"label": "1", "cidr": "10.0.0.0/24", + 'project_id': '4ffc664c198e435e9853f2538fbcd7a7', + 'id': '1'}]}) + + def get_os_tenant_networks_1(self, **kw): + return (200, {}, {'network': {"label": "1", "cidr": "10.0.0.0/24", + 'project_id': '4ffc664c198e435e9853f2538fbcd7a7', + 'id': '1'}}) + + def post_os_tenant_networks(self, **kw): + return (201, {}, {'network': {"label": "1", "cidr": "10.0.0.0/24", + 'project_id': '4ffc664c198e435e9853f2538fbcd7a7', + 'id': '1'}}) + + def delete_os_tenant_networks_1(self, **kw): + return (204, {}, None) + + def get_os_baremetal_nodes(self, **kw): + return ( + 200, {}, { + 'nodes': [ + { + "id": 1, + "instance_uuid": None, + "pm_address": "1.2.3.4", + "interfaces": [], + "cpus": 2, + "local_gb": 10, + "memory_mb": 5, + "pm_address": "2.3.4.5", + "pm_user": "pmuser", + "pm_password": "pmpass", + "prov_mac_address": "aa:bb:cc:dd:ee:ff", + "prov_vlan_id": 1, + "service_host": "somehost", + "terminal_port": 8080, + } + ] + } + ) + + def get_os_baremetal_nodes_1(self, **kw): + return ( + 200, {}, { + 'node': { + "id": 1, + "instance_uuid": None, + "pm_address": "1.2.3.4", + "interfaces": [], + "cpus": 2, + "local_gb": 10, + "memory_mb": 5, + "pm_user": "pmuser", + "pm_password": "pmpass", + "prov_mac_address": "aa:bb:cc:dd:ee:ff", + "prov_vlan_id": 1, + "service_host": "somehost", + "terminal_port": 8080, + } + } + ) + + def post_os_baremetal_nodes(self, **kw): + return ( + 200, {}, { + 'node': { + "id": 1, + "instance_uuid": None, + "cpus": 2, + "local_gb": 10, + "memory_mb": 5, + "pm_address": "2.3.4.5", + "pm_user": "pmuser", + "pm_password": "pmpass", + "prov_mac_address": "aa:bb:cc:dd:ee:ff", + "prov_vlan_id": 1, + "service_host": "somehost", + "terminal_port": 8080, + } + } + ) + + def delete_os_baremetal_nodes_1(self, **kw): + return (202, {}, {}) + + def post_os_baremetal_nodes_1_action(self, **kw): + body = kw['body'] + action = body.keys()[0] + if action == "add_interface": + return ( + 200, {}, { + 'interface': { + "id": 2, + "address": "bb:cc:dd:ee:ff:aa", + "datapath_id": 1, + "port_no": 2, + } + } + ) + elif action == "remove_interface": + return (202, {}, {}) + else: + return (500, {}, {}) + + def post_os_assisted_volume_snapshots(self, **kw): + return (202, {}, {'snapshot': {'id': 'blah', 'volumeId': '1'}}) + + def delete_os_assisted_volume_snapshots_x(self, **kw): + return (202, {}, {}) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_assisted_volume_snapshots.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_assisted_volume_snapshots.py new file mode 100644 index 0000000000..34d958f080 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_assisted_volume_snapshots.py @@ -0,0 +1,41 @@ +# Copyright (C) 2013, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Assisted volume snapshots - to be used by Cinder and not end users. +""" + +from novaclient import extension +from novaclient.tests import utils +from novaclient.tests.v1_1.contrib import fakes +from novaclient.v1_1.contrib import assisted_volume_snapshots as assisted_snaps + + +extensions = [ + extension.Extension(assisted_snaps.__name__.split(".")[-1], + assisted_snaps), +] +cs = fakes.FakeClient(extensions=extensions) + + +class AssistedVolumeSnapshotsTestCase(utils.TestCase): + + def test_create_snap(self): + res = cs.assisted_volume_snapshots.create('1', {}) + cs.assert_called('POST', '/os-assisted-volume-snapshots') + + def test_delete_snap(self): + res = cs.assisted_volume_snapshots.delete('x', {}) + cs.assert_called('DELETE', + '/os-assisted-volume-snapshots/x?delete_info={}') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_baremetal.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_baremetal.py new file mode 100644 index 0000000000..88b129f0ff --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_baremetal.py @@ -0,0 +1,65 @@ +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from novaclient import extension +from novaclient.v1_1.contrib import baremetal + +from novaclient.tests import utils +from novaclient.tests.v1_1.contrib import fakes + + +extensions = [ + extension.Extension(baremetal.__name__.split(".")[-1], baremetal), + ] +cs = fakes.FakeClient(extensions=extensions) + + +class BaremetalExtensionTest(utils.TestCase): + + def test_list_nodes(self): + nl = cs.baremetal.list() + cs.assert_called('GET', '/os-baremetal-nodes') + for n in nl: + self.assertTrue(isinstance(n, baremetal.BareMetalNode)) + + def test_get_node(self): + n = cs.baremetal.get(1) + cs.assert_called('GET', '/os-baremetal-nodes/1') + self.assertTrue(isinstance(n, baremetal.BareMetalNode)) + + def test_create_node(self): + n = cs.baremetal.create("service_host", 1, 1024, 2048, + "aa:bb:cc:dd:ee:ff") + cs.assert_called('POST', '/os-baremetal-nodes') + self.assertTrue(isinstance(n, baremetal.BareMetalNode)) + + def test_delete_node(self): + n = cs.baremetal.get(1) + cs.baremetal.delete(n) + cs.assert_called('DELETE', '/os-baremetal-nodes/1') + + def test_node_add_interface(self): + i = cs.baremetal.add_interface(1, "bb:cc:dd:ee:ff:aa", 1, 2) + cs.assert_called('POST', '/os-baremetal-nodes/1/action') + self.assertTrue(isinstance(i, baremetal.BareMetalNodeInterface)) + + def test_node_remove_interface(self): + cs.baremetal.remove_interface(1, "bb:cc:dd:ee:ff:aa") + cs.assert_called('POST', '/os-baremetal-nodes/1/action') + + def test_node_list_interfaces(self): + cs.baremetal.list_interfaces(1) + cs.assert_called('GET', '/os-baremetal-nodes/1') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_cells.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_cells.py new file mode 100644 index 0000000000..187fab8477 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_cells.py @@ -0,0 +1,42 @@ +# Copyright 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import extension +from novaclient.tests import utils +from novaclient.tests.v1_1.contrib import fakes +from novaclient.v1_1.contrib import cells + + +extensions = [ + extension.Extension(cells.__name__.split(".")[-1], + cells), +] +cs = fakes.FakeClient(extensions=extensions) + + +class CellsExtensionTests(utils.TestCase): + def test_get_cells(self): + cell_name = 'child_cell' + cs.cells.get(cell_name) + cs.assert_called('GET', '/os-cells/%s' % cell_name) + + def test_get_capacities_for_a_given_cell(self): + cell_name = 'child_cell' + cs.cells.capacities(cell_name) + cs.assert_called('GET', '/os-cells/%s/capacities' % cell_name) + + def test_get_capacities_for_all_cells(self): + cs.cells.capacities() + cs.assert_called('GET', '/os-cells/capacities') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_instance_actions.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_instance_actions.py new file mode 100644 index 0000000000..cdfa14985f --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_instance_actions.py @@ -0,0 +1,41 @@ +# Copyright 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from novaclient import extension +from novaclient.v1_1.contrib import instance_action + +from novaclient.tests import utils +from novaclient.tests.v1_1.contrib import fakes + + +extensions = [ + extension.Extension(instance_action.__name__.split(".")[-1], + instance_action), +] +cs = fakes.FakeClient(extensions=extensions) + + +class InstanceActionExtensionTests(utils.TestCase): + def test_list_instance_actions(self): + server_uuid = '1234' + cs.instance_action.list(server_uuid) + cs.assert_called('GET', '/servers/%s/os-instance-actions' % + server_uuid) + + def test_get_instance_action(self): + server_uuid = '1234' + request_id = 'req-abcde12345' + cs.instance_action.get(server_uuid, request_id) + cs.assert_called('GET', '/servers/%s/os-instance-actions/%s' % + (server_uuid, request_id)) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_list_extensions.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_list_extensions.py new file mode 100644 index 0000000000..f9ede2972b --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_list_extensions.py @@ -0,0 +1,21 @@ +from novaclient import extension +from novaclient.v1_1.contrib import list_extensions + +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +extensions = [ + extension.Extension(list_extensions.__name__.split(".")[-1], + list_extensions), +] +cs = fakes.FakeClient(extensions=extensions) + + +class ListExtensionsTests(utils.TestCase): + def test_list_extensions(self): + all_exts = cs.list_extensions.show_all() + cs.assert_called('GET', '/extensions') + self.assertTrue(len(all_exts) > 0) + for r in all_exts: + self.assertTrue(len(r.summary) > 0) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_migrations.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_migrations.py new file mode 100644 index 0000000000..7b49c4cc70 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_migrations.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import extension +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes +from novaclient.v1_1.contrib import migrations + +extensions = [ + extension.Extension(migrations.__name__.split(".")[-1], + migrations), +] +cs = fakes.FakeClient(extensions=extensions) + + +class MigrationsTest(utils.TestCase): + + def test_list_migrations(self): + ml = cs.migrations.list() + cs.assert_called('GET', '/os-migrations') + for m in ml: + self.assertTrue(isinstance(m, migrations.Migration)) + + def test_list_migrations_with_filters(self): + ml = cs.migrations.list('host1', 'finished', 'child1') + + cs.assert_called('GET', + '/os-migrations?status=finished&host=host1' + '&cell_name=child1') + for m in ml: + self.assertTrue(isinstance(m, migrations.Migration)) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_tenant_networks.py b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_tenant_networks.py new file mode 100644 index 0000000000..bb2cbaf8ca --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/contrib/test_tenant_networks.py @@ -0,0 +1,49 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import extension +from novaclient.v1_1.contrib import tenant_networks + +from novaclient.tests import utils +from novaclient.tests.v1_1.contrib import fakes + + +extensions = [ + extension.Extension(tenant_networks.__name__.split(".")[-1], + tenant_networks), +] +cs = fakes.FakeClient(extensions=extensions) + + +class TenantNetworkExtensionTests(utils.TestCase): + def test_list_tenant_networks(self): + nets = cs.tenant_networks.list() + cs.assert_called('GET', '/os-tenant-networks') + self.assertTrue(len(nets) > 0) + + def test_get_tenant_network(self): + cs.tenant_networks.get(1) + cs.assert_called('GET', '/os-tenant-networks/1') + + def test_create_tenant_networks(self): + cs.tenant_networks.create(label="net", + cidr="10.0.0.0/24") + cs.assert_called('POST', '/os-tenant-networks') + + def test_delete_tenant_networks(self): + cs.tenant_networks.delete(1) + cs.assert_called('DELETE', '/os-tenant-networks/1') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/fakes.py b/awx/lib/site-packages/novaclient/tests/v1_1/fakes.py new file mode 100644 index 0000000000..7b6bc119d4 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/fakes.py @@ -0,0 +1,1949 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2011 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime + +import six + +from novaclient import client as base_client +from novaclient import exceptions +from novaclient.openstack.common import strutils +from novaclient.openstack.common.py3kcompat import urlutils +from novaclient.tests import fakes +from novaclient.tests import utils +from novaclient.v1_1 import client + + +class FakeClient(fakes.FakeClient, client.Client): + + def __init__(self, *args, **kwargs): + client.Client.__init__(self, 'username', 'password', + 'project_id', 'auth_url', + extensions=kwargs.get('extensions')) + self.client = FakeHTTPClient(**kwargs) + + +class FakeHTTPClient(base_client.HTTPClient): + + def __init__(self, **kwargs): + self.username = 'username' + self.password = 'password' + self.auth_url = 'auth_url' + self.tenant_id = 'tenant_id' + self.callstack = [] + self.projectid = 'projectid' + self.user = 'user' + self.region_name = 'region_name' + self.endpoint_type = 'endpoint_type' + self.service_type = 'service_type' + self.service_name = 'service_name' + self.volume_service_name = 'volume_service_name' + self.timings = 'timings' + self.bypass_url = 'bypass_url' + self.os_cache = 'os_cache' + self.http_log_debug = 'http_log_debug' + + def _cs_request(self, url, method, **kwargs): + # Check that certain things are called correctly + if method in ['GET', 'DELETE']: + assert 'body' not in kwargs + elif method == 'PUT': + assert 'body' in kwargs + + # Call the method + args = urlutils.parse_qsl(urlutils.urlparse(url)[4]) + kwargs.update(args) + munged_url = url.rsplit('?', 1)[0] + munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_') + munged_url = munged_url.replace('-', '_') + munged_url = munged_url.replace(' ', '_') + + callback = "%s_%s" % (method.lower(), munged_url) + + if not hasattr(self, callback): + raise AssertionError('Called unknown API method: %s %s, ' + 'expected fakes method name: %s' % + (method, url, callback)) + + # Note the call + self.callstack.append((method, url, kwargs.get('body', None))) + + status, headers, body = getattr(self, callback)(**kwargs) + r = utils.TestResponse({ + "status_code": status, + "text": body, + "headers": headers, + }) + return r, body + + # + # agents + # + + def get_os_agents(self, **kw): + hypervisor = kw.get('hypervisor', 'kvm') + return (200, {}, {'agents': + [{'hypervisor': hypervisor, + 'os': 'win', + 'architecture': 'x86', + 'version': '7.0', + 'url': 'xxx://xxxx/xxx/xxx', + 'md5hash': 'add6bb58e139be103324d04d82d8f545', + 'id': 1}, + {'hypervisor': hypervisor, + 'os': 'linux', + 'architecture': 'x86', + 'version': '16.0', + 'url': 'xxx://xxxx/xxx/xxx1', + 'md5hash': 'add6bb58e139be103324d04d82d8f546', + 'id': 2}, + ]}) + + def post_os_agents(self, body): + return (200, {}, {'agent': { + 'url': '/xxx/xxx/xxx', + 'hypervisor': body['agent']['hypervisor'], + 'md5hash': 'add6bb58e139be103324d04d82d8f546', + 'version': '7.0', + 'architecture': 'x86', + 'os': 'win', + 'id': 1}}) + + def delete_os_agents_1(self, **kw): + return (202, {}, None) + + def put_os_agents_1(self, body, **kw): + return (200, {}, {"agent": { + "url": "/yyy/yyyy/yyyy", + "version": "8.0", + "md5hash": "add6bb58e139be103324d04d82d8f546", + 'id': 1}}) + + # + # List all extensions + # + + def get_extensions(self, **kw): + exts = [ + { + "alias": "NMN", + "description": "Multiple network support", + "links": [], + "name": "Multinic", + "namespace": ("http://docs.openstack.org/" + "compute/ext/multinic/api/v1.1"), + "updated": "2011-06-09T00:00:00+00:00" + }, + { + "alias": "OS-DCF", + "description": "Disk Management Extension", + "links": [], + "name": "DiskConfig", + "namespace": ("http://docs.openstack.org/" + "compute/ext/disk_config/api/v1.1"), + "updated": "2011-09-27T00:00:00+00:00" + }, + { + "alias": "OS-EXT-SRV-ATTR", + "description": "Extended Server Attributes support.", + "links": [], + "name": "ExtendedServerAttributes", + "namespace": ("http://docs.openstack.org/" + "compute/ext/extended_status/api/v1.1"), + "updated": "2011-11-03T00:00:00+00:00" + }, + { + "alias": "OS-EXT-STS", + "description": "Extended Status support", + "links": [], + "name": "ExtendedStatus", + "namespace": ("http://docs.openstack.org/" + "compute/ext/extended_status/api/v1.1"), + "updated": "2011-11-03T00:00:00+00:00" + }, + ] + return (200, {}, { + "extensions": exts, + }) + + # + # Limits + # + + def get_limits(self, **kw): + return (200, {}, {"limits": { + "rate": [ + { + "uri": "*", + "regex": ".*", + "limit": [ + { + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z" + }, + { + "value": 10, + "verb": "PUT", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z" + }, + { + "value": 100, + "verb": "DELETE", + "remaining": 100, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z" + } + ] + }, + { + "uri": "*/servers", + "regex": "^/servers", + "limit": [ + { + "verb": "POST", + "value": 25, + "remaining": 24, + "unit": "DAY", + "next-available": "2011-12-15T22:42:45Z" + } + ] + } + ], + "absolute": { + "maxTotalRAMSize": 51200, + "maxServerMeta": 5, + "maxImageMeta": 5, + "maxPersonality": 5, + "maxPersonalitySize": 10240 + }, + }, + }) + + # + # Servers + # + + def get_servers(self, **kw): + return (200, {}, {"servers": [ + {'id': 1234, 'name': 'sample-server'}, + {'id': 5678, 'name': 'sample-server2'} + ]}) + + def get_servers_detail(self, **kw): + return (200, {}, {"servers": [ + { + "id": 1234, + "name": "sample-server", + "image": { + "id": 2, + "name": "sample image", + }, + "flavor": { + "id": 1, + "name": "256 MB Server", + }, + "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", + "status": "BUILD", + "progress": 60, + "addresses": { + "public": [{ + "version": 4, + "addr": "1.2.3.4", + }, + { + "version": 4, + "addr": "5.6.7.8", + }], + "private": [{ + "version": 4, + "addr": "10.11.12.13", + }], + }, + "metadata": { + "Server Label": "Web Head 1", + "Image Version": "2.1" + }, + "OS-EXT-SRV-ATTR:host": "computenode1", + "security_groups": [{ + 'id': 1, 'name': 'securitygroup1', + 'description': 'FAKE_SECURITY_GROUP', + 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7' + }], + "OS-EXT-MOD:some_thing": "mod_some_thing_value", + }, + { + "id": 5678, + "name": "sample-server2", + "image": { + "id": 2, + "name": "sample image", + }, + "flavor": { + "id": 1, + "name": "256 MB Server", + }, + "hostId": "9e107d9d372bb6826bd81d3542a419d6", + "status": "ACTIVE", + "addresses": { + "public": [{ + "version": 4, + "addr": "4.5.6.7", + }, + { + "version": 4, + "addr": "5.6.9.8", + }], + "private": [{ + "version": 4, + "addr": "10.13.12.13", + }], + }, + "metadata": { + "Server Label": "DB 1" + }, + "OS-EXT-SRV-ATTR:host": "computenode2", + }, + { + "id": 9012, + "name": "sample-server3", + "image": "", + "flavor": { + "id": 1, + "name": "256 MB Server", + }, + "hostId": "9e107d9d372bb6826bd81d3542a419d6", + "status": "ACTIVE", + "addresses": { + "public": [{ + "version": 4, + "addr": "4.5.6.7", + }, + { + "version": 4, + "addr": "5.6.9.8", + }], + "private": [{ + "version": 4, + "addr": "10.13.12.13", + }], + }, + "metadata": { + "Server Label": "DB 1" + } + } + ]}) + + def post_servers(self, body, **kw): + assert set(body.keys()) <= set(['server', 'os:scheduler_hints']) + fakes.assert_has_keys(body['server'], + required=['name', 'imageRef', 'flavorRef'], + optional=['metadata', 'personality']) + if 'personality' in body['server']: + for pfile in body['server']['personality']: + fakes.assert_has_keys(pfile, required=['path', 'contents']) + return (202, {}, self.get_servers_1234()[2]) + + def post_os_volumes_boot(self, body, **kw): + assert set(body.keys()) <= set(['server', 'os:scheduler_hints']) + fakes.assert_has_keys(body['server'], + required=['name', 'flavorRef'], + optional=['imageRef']) + + # Require one, and only one, of the keys for bdm + if 'block_device_mapping' not in body['server']: + if 'block_device_mapping_v2' not in body['server']: + raise AssertionError( + "missing required keys: 'block_device_mapping'" + ) + elif 'block_device_mapping_v2' in body['server']: + raise AssertionError("found extra keys: 'block_device_mapping'") + + return (202, {}, self.get_servers_9012()[2]) + + def get_servers_1234(self, **kw): + r = {'server': self.get_servers_detail()[2]['servers'][0]} + return (200, {}, r) + + def get_servers_5678(self, **kw): + r = {'server': self.get_servers_detail()[2]['servers'][1]} + return (200, {}, r) + + def get_servers_9012(self, **kw): + r = {'server': self.get_servers_detail()[2]['servers'][2]} + return (200, {}, r) + + def put_servers_1234(self, body, **kw): + assert body.keys() == ['server'] + fakes.assert_has_keys(body['server'], optional=['name', 'adminPass']) + return (204, {}, body) + + def delete_servers_1234(self, **kw): + return (202, {}, None) + + def delete_servers_5678(self, **kw): + return (202, {}, None) + + def delete_servers_1234_metadata_test_key(self, **kw): + return (204, {}, None) + + def delete_servers_1234_metadata_key1(self, **kw): + return (204, {}, None) + + def delete_servers_1234_metadata_key2(self, **kw): + return (204, {}, None) + + def post_servers_1234_metadata(self, **kw): + return (204, {}, {'metadata': {'test_key': 'test_value'}}) + + def get_servers_1234_diagnostics(self, **kw): + return (200, {}, {'data': 'Fake diagnostics'}) + + def post_servers_uuid1_metadata(self, **kw): + return (204, {}, {'metadata': {'key1': 'val1'}}) + + def post_servers_uuid2_metadata(self, **kw): + return (204, {}, {'metadata': {'key1': 'val1'}}) + + def post_servers_uuid3_metadata(self, **kw): + return (204, {}, {'metadata': {'key1': 'val1'}}) + + def post_servers_uuid4_metadata(self, **kw): + return (204, {}, {'metadata': {'key1': 'val1'}}) + + def delete_servers_uuid1_metadata_key1(self, **kw): + return (200, {}, {'data': 'Fake diagnostics'}) + + def delete_servers_uuid2_metadata_key1(self, **kw): + return (200, {}, {'data': 'Fake diagnostics'}) + + def delete_servers_uuid3_metadata_key1(self, **kw): + return (200, {}, {'data': 'Fake diagnostics'}) + + def delete_servers_uuid4_metadata_key1(self, **kw): + return (200, {}, {'data': 'Fake diagnostics'}) + + def get_servers_1234_os_security_groups(self, **kw): + return (200, {}, { + "security_groups": [{ + 'id': 1, + 'name': 'securitygroup1', + 'description': 'FAKE_SECURITY_GROUP', + 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7', + 'rules': []}] + }) + + # + # Server Addresses + # + + def get_servers_1234_ips(self, **kw): + return (200, {}, {'addresses': + self.get_servers_1234()[1]['server']['addresses']}) + + def get_servers_1234_ips_public(self, **kw): + return (200, {}, {'public': + self.get_servers_1234_ips()[1]['addresses']['public']}) + + def get_servers_1234_ips_private(self, **kw): + return (200, {}, {'private': + self.get_servers_1234_ips()[1]['addresses']['private']}) + + def delete_servers_1234_ips_public_1_2_3_4(self, **kw): + return (202, {}, None) + + # + # Server password + # + + def get_servers_1234_os_server_password(self, **kw): + return (200, {}, {'password': ''}) + + def delete_servers_1234_os_server_password(self, **kw): + return (202, {}, None) + + # + # Server actions + # + + def post_servers_1234_action(self, body, **kw): + _headers = None + _body = None + resp = 202 + assert len(body.keys()) == 1 + action = body.keys()[0] + if action == 'reboot': + assert body[action].keys() == ['type'] + assert body[action]['type'] in ['HARD', 'SOFT'] + elif action == 'rebuild': + keys = body[action].keys() + if 'adminPass' in keys: + keys.remove('adminPass') + assert 'imageRef' in keys + _body = self.get_servers_1234()[2] + elif action == 'resize': + keys = body[action].keys() + assert 'flavorRef' in keys + elif action == 'confirmResize': + assert body[action] is None + # This one method returns a different response code + return (204, {}, None) + elif action == 'revertResize': + assert body[action] is None + elif action == 'migrate': + assert body[action] is None + elif action == 'os-stop': + assert body[action] is None + elif action == 'os-start': + assert body[action] is None + elif action == 'forceDelete': + assert body[action] is None + elif action == 'restore': + assert body[action] is None + elif action == 'pause': + assert body[action] is None + elif action == 'unpause': + assert body[action] is None + elif action == 'lock': + assert body[action] is None + elif action == 'unlock': + assert body[action] is None + elif action == 'rescue': + assert body[action] is None + _body = {'Password': 'RescuePassword'} + elif action == 'unrescue': + assert body[action] is None + elif action == 'resume': + assert body[action] is None + elif action == 'suspend': + assert body[action] is None + elif action == 'lock': + assert body[action] is None + elif action == 'unlock': + assert body[action] is None + elif action == 'addFixedIp': + assert body[action].keys() == ['networkId'] + elif action == 'removeFixedIp': + assert body[action].keys() == ['address'] + elif action == 'addFloatingIp': + assert (body[action].keys() == ['address'] or + body[action].keys() == ['fixed_address', + 'address']) + elif action == 'removeFloatingIp': + assert body[action].keys() == ['address'] + elif action == 'createImage': + assert set(body[action].keys()) == set(['name', 'metadata']) + _headers = dict(location="http://blah/images/456") + elif action == 'changePassword': + assert body[action].keys() == ['adminPass'] + elif action == 'os-getConsoleOutput': + assert body[action].keys() == ['length'] + return (202, {}, {'output': 'foo'}) + elif action == 'os-getVNCConsole': + assert body[action].keys() == ['type'] + elif action == 'os-getSPICEConsole': + assert body[action].keys() == ['type'] + elif action == 'os-migrateLive': + assert set(body[action].keys()) == set(['host', + 'block_migration', + 'disk_over_commit']) + elif action == 'os-resetState': + assert body[action].keys() == ['state'] + elif action == 'resetNetwork': + assert body[action] is None + elif action == 'addSecurityGroup': + assert body[action].keys() == ['name'] + elif action == 'removeSecurityGroup': + assert body[action].keys() == ['name'] + elif action == 'createBackup': + assert set(body[action].keys()) == set(['name', + 'backup_type', + 'rotation']) + elif action == 'evacuate': + keys = body[action].keys() + if 'adminPass' in keys: + keys.remove('adminPass') + assert set(keys) == set(['host', 'onSharedStorage']) + else: + raise AssertionError("Unexpected server action: %s" % action) + return (resp, _headers, _body) + + # + # Cloudpipe + # + + def get_os_cloudpipe(self, **kw): + return ( + 200, + {}, + {'cloudpipes': [{'project_id': 1}]} + ) + + def post_os_cloudpipe(self, **ks): + return ( + 202, + {}, + {'instance_id': '9d5824aa-20e6-4b9f-b967-76a699fc51fd'} + ) + + def put_os_cloudpipe_configure_project(self, **kw): + return (202, {}, None) + + # + # Flavors + # + + def get_flavors(self, **kw): + status, header, flavors = self.get_flavors_detail(**kw) + for flavor in flavors['flavors']: + for k in flavor.keys(): + if k not in ['id', 'name']: + del flavor[k] + + return (200, {}, flavors) + + def get_flavors_detail(self, **kw): + flavors = {'flavors': [ + {'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10, + 'OS-FLV-EXT-DATA:ephemeral': 10, + 'os-flavor-access:is_public': True, + 'links': {}}, + {'id': 2, 'name': '512 MB Server', 'ram': 512, 'disk': 20, + 'OS-FLV-EXT-DATA:ephemeral': 20, + 'os-flavor-access:is_public': False, + 'links': {}}, + {'id': 'aa1', 'name': '128 MB Server', 'ram': 128, 'disk': 0, + 'OS-FLV-EXT-DATA:ephemeral': 0, + 'os-flavor-access:is_public': True, + 'links': {}} + ]} + + if 'is_public' not in kw: + filter_is_public = True + else: + if kw['is_public'].lower() == 'none': + filter_is_public = None + else: + filter_is_public = strutils.bool_from_string(kw['is_public'], + True) + + if filter_is_public is not None: + if filter_is_public: + flavors['flavors'] = [ + v for v in flavors['flavors'] + if v['os-flavor-access:is_public'] + ] + else: + flavors['flavors'] = [ + v for v in flavors['flavors'] + if not v['os-flavor-access:is_public'] + ] + + return (200, {}, flavors) + + def get_flavors_1(self, **kw): + return ( + 200, + {}, + {'flavor': + self.get_flavors_detail(is_public='None')[2]['flavors'][0]} + ) + + def get_flavors_2(self, **kw): + return ( + 200, + {}, + {'flavor': + self.get_flavors_detail(is_public='None')[2]['flavors'][1]} + ) + + def get_flavors_3(self, **kw): + # Diablo has no ephemeral + return ( + 200, + {}, + {'flavor': { + 'id': 3, + 'name': '256 MB Server', + 'ram': 256, + 'disk': 10, + }}, + ) + + def get_flavors_512_MB_Server(self, **kw): + raise exceptions.NotFound('404') + + def get_flavors_aa1(self, **kw): + # Aplhanumeric flavor id are allowed. + return ( + 200, + {}, + {'flavor': + self.get_flavors_detail(is_public='None')[2]['flavors'][2]} + ) + + def delete_flavors_flavordelete(self, **kw): + return (202, {}, None) + + def delete_flavors_2(self, **kw): + return (202, {}, None) + + def post_flavors(self, body, **kw): + return ( + 202, + {}, + {'flavor': + self.get_flavors_detail(is_public='None')[2]['flavors'][0]} + ) + + def get_flavors_1_os_extra_specs(self, **kw): + return (200, + {}, + {'extra_specs': {"k1": "v1"}}) + + def get_flavors_2_os_extra_specs(self, **kw): + return (200, + {}, + {'extra_specs': {"k2": "v2"}}) + + def get_flavors_aa1_os_extra_specs(self, **kw): + return (200, {}, + {'extra_specs': {"k3": "v3"}}) + + def post_flavors_1_os_extra_specs(self, body, **kw): + assert body.keys() == ['extra_specs'] + fakes.assert_has_keys(body['extra_specs'], + required=['k1']) + return (200, + {}, + {'extra_specs': {"k1": "v1"}}) + + def delete_flavors_1_os_extra_specs_k1(self, **kw): + return (204, {}, None) + + # + # Flavor access + # + + def get_flavors_1_os_flavor_access(self, **kw): + return (404, {}, None) + + def get_flavors_2_os_flavor_access(self, **kw): + return (200, {}, {'flavor_access': [ + {'flavor_id': '2', 'tenant_id': 'proj1'}, + {'flavor_id': '2', 'tenant_id': 'proj2'} + ]}) + + def post_flavors_2_action(self, body, **kw): + return (202, {}, self.get_flavors_2_os_flavor_access()[2]) + + # + # Floating ips + # + + def get_os_floating_ip_pools(self): + return ( + 200, + {}, + {'floating_ip_pools': [{'name': 'foo', 'name': 'bar'}]} + ) + + def get_os_floating_ips(self, **kw): + return ( + 200, + {}, + {'floating_ips': [ + {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}, + {'id': 2, 'fixed_ip': '10.0.0.2', 'ip': '11.0.0.2'}, + ]}, + ) + + def get_os_floating_ips_1(self, **kw): + return (200, {}, {'floating_ip': + {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'} + }) + + def post_os_floating_ips(self, body): + if body.get('pool'): + return (200, {}, {'floating_ip': + {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1', + 'pool': 'nova'}}) + else: + return (200, {}, {'floating_ip': + {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1', + 'pool': None}}) + + def delete_os_floating_ips_1(self, **kw): + return (204, {}, None) + + def get_os_floating_ip_dns(self, **kw): + return (205, {}, {'domain_entries': + [{'domain': 'example.org'}, + {'domain': 'example.com'}]}) + + def get_os_floating_ip_dns_testdomain_entries(self, **kw): + if kw.get('ip'): + return (205, {}, {'dns_entries': + [{'dns_entry': + {'ip': kw.get('ip'), + 'name': "host1", + 'type': "A", + 'domain': 'testdomain'}}, + {'dns_entry': + {'ip': kw.get('ip'), + 'name': "host2", + 'type': "A", + 'domain': 'testdomain'}}]}) + else: + return (404, {}, None) + + def get_os_floating_ip_dns_testdomain_entries_testname(self, **kw): + return (205, {}, {'dns_entry': + {'ip': "10.10.10.10", + 'name': 'testname', + 'type': "A", + 'domain': 'testdomain'}}) + + def put_os_floating_ip_dns_testdomain(self, body, **kw): + if body['domain_entry']['scope'] == 'private': + fakes.assert_has_keys(body['domain_entry'], + required=['availability_zone', 'scope']) + elif body['domain_entry']['scope'] == 'public': + fakes.assert_has_keys(body['domain_entry'], + required=['project', 'scope']) + + else: + fakes.assert_has_keys(body['domain_entry'], + required=['project', 'scope']) + return (205, {}, body) + + def put_os_floating_ip_dns_testdomain_entries_testname(self, body, **kw): + fakes.assert_has_keys(body['dns_entry'], + required=['ip', 'dns_type']) + return (205, {}, body) + + def delete_os_floating_ip_dns_testdomain(self, **kw): + return (200, {}, None) + + def delete_os_floating_ip_dns_testdomain_entries_testname(self, **kw): + return (200, {}, None) + + def get_os_floating_ips_bulk(self, **kw): + return (200, {}, {'floating_ip_info': [ + {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}, + {'id': 2, 'fixed_ip': '10.0.0.2', 'ip': '11.0.0.2'}, + ]}) + + def get_os_floating_ips_bulk_testHost(self, **kw): + return (200, {}, {'floating_ip_info': [ + {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}, + {'id': 2, 'fixed_ip': '10.0.0.2', 'ip': '11.0.0.2'}, + ]}) + + def post_os_floating_ips_bulk(self, **kw): + params = kw.get('body').get('floating_ips_bulk_create') + pool = params.get('pool', 'defaultPool') + interface = params.get('interface', 'defaultInterface') + return (200, {}, {'floating_ips_bulk_create': + {'ip_range': '192.168.1.0/30', + 'pool': pool, + 'interface': interface}}) + + def put_os_floating_ips_bulk_delete(self, **kw): + ip_range = kw.get('body').get('ip_range') + return (200, {}, {'floating_ips_bulk_delete': ip_range}) + + # + # Images + # + def get_images(self, **kw): + return (200, {}, {'images': [ + {'id': 1, 'name': 'CentOS 5.2'}, + {'id': 2, 'name': 'My Server Backup'} + ]}) + + def get_images_detail(self, **kw): + return (200, {}, {'images': [ + { + 'id': 1, + 'name': 'CentOS 5.2', + "updated": "2010-10-10T12:00:00Z", + "created": "2010-08-10T12:00:00Z", + "status": "ACTIVE", + "metadata": { + "test_key": "test_value", + }, + "links": {}, + }, + { + "id": 2, + "name": "My Server Backup", + "serverId": 1234, + "updated": "2010-10-10T12:00:00Z", + "created": "2010-08-10T12:00:00Z", + "status": "SAVING", + "progress": 80, + "links": {}, + } + ]}) + + def get_images_1(self, **kw): + return (200, {}, {'image': self.get_images_detail()[2]['images'][0]}) + + def get_images_2(self, **kw): + return (200, {}, {'image': self.get_images_detail()[2]['images'][1]}) + + def post_images(self, body, **kw): + assert body.keys() == ['image'] + fakes.assert_has_keys(body['image'], required=['serverId', 'name']) + return (202, {}, self.get_images_1()[2]) + + def post_images_1_metadata(self, body, **kw): + assert body.keys() == ['metadata'] + fakes.assert_has_keys(body['metadata'], + required=['test_key']) + return (200, + {}, + {'metadata': self.get_images_1()[2]['image']['metadata']}) + + def delete_images_1(self, **kw): + return (204, {}, None) + + def delete_images_2(self, **kw): + return (204, {}, None) + + def delete_images_1_metadata_test_key(self, **kw): + return (204, {}, None) + + # + # Keypairs + # + def get_os_keypairs_test(self, *kw): + return (200, {}, {'keypair': self.get_os_keypairs()[2]['keypairs'][0]}) + + def get_os_keypairs(self, *kw): + return (200, {}, {"keypairs": [ + {'fingerprint': 'FAKE_KEYPAIR', 'name': 'test'} + ]}) + + def delete_os_keypairs_test(self, **kw): + return (202, {}, None) + + def post_os_keypairs(self, body, **kw): + assert body.keys() == ['keypair'] + fakes.assert_has_keys(body['keypair'], + required=['name']) + r = {'keypair': self.get_os_keypairs()[2]['keypairs'][0]} + return (202, {}, r) + + # + # Virtual Interfaces + # + def get_servers_1234_os_virtual_interfaces(self, **kw): + return (200, {}, {"virtual_interfaces": [ + {'id': 'fakeid', 'mac_address': 'fakemac'} + ]}) + + # + # Quotas + # + + def get_os_quota_sets_test(self, **kw): + return (200, {}, {'quota_set': { + 'tenant_id': 'test', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 1, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def get_os_quota_sets_tenant_id(self, **kw): + return (200, {}, {'quota_set': { + 'tenant_id': 'test', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 1, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def get_os_quota_sets_97f4c221bff44578b0300df4ef119353(self, **kw): + return (200, {}, {'quota_set': { + 'tenant_id': '97f4c221bff44578b0300df4ef119353', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 1, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def put_os_quota_sets_97f4c221_bff4_4578_b030_0df4ef119353(self, **kw): + return (200, {}, {'quota_set': { + 'tenant_id': '97f4c221-bff4-4578-b030-0df4ef119353', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 1, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def get_os_quota_sets_97f4c221_bff4_4578_b030_0df4ef119353(self, **kw): + return (200, {}, {'quota_set': { + 'tenant_id': '97f4c221-bff4-4578-b030-0df4ef119353', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 1, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def get_os_quota_sets_97f4c221bff44578b0300df4ef119353_defaults(self): + return (200, {}, {'quota_set': { + 'tenant_id': 'test', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 1, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def get_os_quota_sets_tenant_id_defaults(self): + return (200, {}, {'quota_set': { + 'tenant_id': 'test', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 1, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def put_os_quota_sets_97f4c221bff44578b0300df4ef119353(self, body, **kw): + assert body.keys() == ['quota_set'] + fakes.assert_has_keys(body['quota_set'], + required=['tenant_id']) + return (200, {}, {'quota_set': { + 'tenant_id': '97f4c221bff44578b0300df4ef119353', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 2, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def delete_os_quota_sets_test(self, **kw): + return (202, {}, {}) + + def delete_os_quota_sets_97f4c221bff44578b0300df4ef119353(self, **kw): + return (202, {}, {}) + + # + # Quota Classes + # + + def get_os_quota_class_sets_test(self, **kw): + return (200, {}, {'quota_class_set': { + 'class_name': 'test', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 1, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def put_os_quota_class_sets_test(self, body, **kw): + assert body.keys() == ['quota_class_set'] + fakes.assert_has_keys(body['quota_class_set'], + required=['class_name']) + return (200, {}, {'quota_class_set': { + 'class_name': 'test', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 2, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + def put_os_quota_class_sets_97f4c221bff44578b0300df4ef119353(self, + body, **kw): + assert body.keys() == ['quota_class_set'] + fakes.assert_has_keys(body['quota_class_set'], + required=['class_name']) + return (200, {}, {'quota_class_set': { + 'class_name': '97f4c221bff44578b0300df4ef119353', + 'metadata_items': [], + 'injected_file_content_bytes': 1, + 'injected_file_path_bytes': 1, + 'volumes': 2, + 'gigabytes': 1, + 'ram': 1, + 'floating_ips': 1, + 'instances': 1, + 'injected_files': 1, + 'cores': 1, + 'keypairs': 1, + 'security_groups': 1, + 'security_group_rules': 1}}) + + # + # Security Groups + # + def get_os_security_groups(self, **kw): + return (200, {}, {"security_groups": [ + {"name": "test", + "description": "FAKE_SECURITY_GROUP", + "tenant_id": + "4ffc664c198e435e9853f2538fbcd7a7", + "id": 1, + "rules": [ + {"id": 11, + "group": {}, + "ip_protocol": "TCP", + "from_port": 22, + "to_port": 22, + "parent_group_id": 1, + "ip_range": + {"cidr": "10.0.0.0/8"}}, + {"id": 12, + "group": { + "tenant_id": + "272bee4c1e624cd4a72a6b0ea55b4582", + "name": "test2"}, + + "ip_protocol": "TCP", + "from_port": 222, + "to_port": 222, + "parent_group_id": 1, + "ip_range": {}}]}, + {"name": "test2", + "description": "FAKE_SECURITY_GROUP2", + "tenant_id": "272bee4c1e624cd4a72a6b0ea55b4582", + "id": 2, + "rules": []} + ]} + ) + + def get_os_security_groups_1(self, **kw): + return (200, {}, {"security_group": + {'id': 1, 'name': 'test', 'description': 'FAKE_SECURITY_GROUP'} + }) + + def delete_os_security_groups_1(self, **kw): + return (202, {}, None) + + def post_os_security_groups(self, body, **kw): + assert body.keys() == ['security_group'] + fakes.assert_has_keys(body['security_group'], + required=['name', 'description']) + r = {'security_group': + self.get_os_security_groups()[2]['security_groups'][0]} + return (202, {}, r) + + def put_os_security_groups_1(self, body, **kw): + assert body.keys() == ['security_group'] + fakes.assert_has_keys(body['security_group'], + required=['name', 'description']) + return (205, {}, body) + + # + # Security Group Rules + # + def get_os_security_group_rules(self, **kw): + return (200, {}, {"security_group_rules": [ + {'id': 1, 'parent_group_id': 1, 'group_id': 2, + 'ip_protocol': 'TCP', 'from_port': '22', 'to_port': 22, + 'cidr': '10.0.0.0/8'} + ]}) + + def delete_os_security_group_rules_1(self, **kw): + return (202, {}, None) + + def delete_os_security_group_rules_11(self, **kw): + return (202, {}, None) + + def delete_os_security_group_rules_12(self, **kw): + return (202, {}, None) + + def post_os_security_group_rules(self, body, **kw): + assert body.keys() == ['security_group_rule'] + fakes.assert_has_keys(body['security_group_rule'], + required=['parent_group_id'], + optional=['group_id', 'ip_protocol', 'from_port', + 'to_port', 'cidr']) + r = {'security_group_rule': + self.get_os_security_group_rules()[2]['security_group_rules'][0]} + return (202, {}, r) + + # + # Tenant Usage + # + def get_os_simple_tenant_usage(self, **kw): + return (200, {}, + {six.u('tenant_usages'): [{ + six.u('total_memory_mb_usage'): 25451.762807466665, + six.u('total_vcpus_usage'): 49.71047423333333, + six.u('total_hours'): 49.71047423333333, + six.u('tenant_id'): + six.u('7b0a1d73f8fb41718f3343c207597869'), + six.u('stop'): six.u('2012-01-22 19:48:41.750722'), + six.u('server_usages'): [{ + six.u('hours'): 49.71047423333333, + six.u('uptime'): 27035, + six.u('local_gb'): 0, + six.u('ended_at'): None, + six.u('name'): six.u('f15image1'), + six.u('tenant_id'): + six.u('7b0a1d73f8fb41718f3343c207597869'), + six.u('vcpus'): 1, + six.u('memory_mb'): 512, + six.u('state'): six.u('active'), + six.u('flavor'): six.u('m1.tiny'), + six.u('started_at'): + six.u('2012-01-20 18:06:06.479998')}], + six.u('start'): six.u('2011-12-25 19:48:41.750687'), + six.u('total_local_gb_usage'): 0.0}]}) + + def get_os_simple_tenant_usage_tenantfoo(self, **kw): + return (200, {}, + {six.u('tenant_usage'): { + six.u('total_memory_mb_usage'): 25451.762807466665, + six.u('total_vcpus_usage'): 49.71047423333333, + six.u('total_hours'): 49.71047423333333, + six.u('tenant_id'): + six.u('7b0a1d73f8fb41718f3343c207597869'), + six.u('stop'): six.u('2012-01-22 19:48:41.750722'), + six.u('server_usages'): [{ + six.u('hours'): 49.71047423333333, + six.u('uptime'): 27035, six.u('local_gb'): 0, + six.u('ended_at'): None, + six.u('name'): six.u('f15image1'), + six.u('tenant_id'): + six.u('7b0a1d73f8fb41718f3343c207597869'), + six.u('vcpus'): 1, six.u('memory_mb'): 512, + six.u('state'): six.u('active'), + six.u('flavor'): six.u('m1.tiny'), + six.u('started_at'): + six.u('2012-01-20 18:06:06.479998')}], + six.u('start'): six.u('2011-12-25 19:48:41.750687'), + six.u('total_local_gb_usage'): 0.0}}) + + def get_os_simple_tenant_usage_test(self, **kw): + return (200, {}, {six.u('tenant_usage'): { + six.u('total_memory_mb_usage'): 25451.762807466665, + six.u('total_vcpus_usage'): 49.71047423333333, + six.u('total_hours'): 49.71047423333333, + six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), + six.u('stop'): six.u('2012-01-22 19:48:41.750722'), + six.u('server_usages'): [{ + six.u('hours'): 49.71047423333333, + six.u('uptime'): 27035, six.u('local_gb'): 0, + six.u('ended_at'): None, + six.u('name'): six.u('f15image1'), + six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), + six.u('vcpus'): 1, six.u('memory_mb'): 512, + six.u('state'): six.u('active'), + six.u('flavor'): six.u('m1.tiny'), + six.u('started_at'): six.u('2012-01-20 18:06:06.479998')}], + six.u('start'): six.u('2011-12-25 19:48:41.750687'), + six.u('total_local_gb_usage'): 0.0}}) + + def get_os_simple_tenant_usage_tenant_id(self, **kw): + return (200, {}, {six.u('tenant_usage'): { + six.u('total_memory_mb_usage'): 25451.762807466665, + six.u('total_vcpus_usage'): 49.71047423333333, + six.u('total_hours'): 49.71047423333333, + six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), + six.u('stop'): six.u('2012-01-22 19:48:41.750722'), + six.u('server_usages'): [{ + six.u('hours'): 49.71047423333333, + six.u('uptime'): 27035, six.u('local_gb'): 0, + six.u('ended_at'): None, + six.u('name'): six.u('f15image1'), + six.u('tenant_id'): six.u('7b0a1d73f8fb41718f3343c207597869'), + six.u('vcpus'): 1, six.u('memory_mb'): 512, + six.u('state'): six.u('active'), + six.u('flavor'): six.u('m1.tiny'), + six.u('started_at'): six.u('2012-01-20 18:06:06.479998')}], + six.u('start'): six.u('2011-12-25 19:48:41.750687'), + six.u('total_local_gb_usage'): 0.0}}) + # + # Certificates + # + + def get_os_certificates_root(self, **kw): + return ( + 200, + {}, + {'certificate': {'private_key': None, 'data': 'foo'}} + ) + + def post_os_certificates(self, **kw): + return ( + 200, + {}, + {'certificate': {'private_key': 'foo', 'data': 'bar'}} + ) + + # + # Aggregates + # + + def get_os_aggregates(self, *kw): + return (200, {}, {"aggregates": [ + {'id': '1', + 'name': 'test', + 'availability_zone': 'nova1'}, + {'id': '2', + 'name': 'test2', + 'availability_zone': 'nova1'}, + ]}) + + def _return_aggregate(self): + r = {'aggregate': self.get_os_aggregates()[2]['aggregates'][0]} + return (200, {}, r) + + def get_os_aggregates_1(self, **kw): + return self._return_aggregate() + + def post_os_aggregates(self, body, **kw): + return self._return_aggregate() + + def put_os_aggregates_1(self, body, **kw): + return self._return_aggregate() + + def put_os_aggregates_2(self, body, **kw): + return self._return_aggregate() + + def post_os_aggregates_1_action(self, body, **kw): + return self._return_aggregate() + + def post_os_aggregates_2_action(self, body, **kw): + return self._return_aggregate() + + def delete_os_aggregates_1(self, **kw): + return (202, {}, None) + + # + # Services + # + def get_os_services(self, **kw): + host = kw.get('host', 'host1') + binary = kw.get('binary', 'nova-compute') + return (200, {}, {'services': + [{'binary': binary, + 'host': host, + 'zone': 'nova', + 'status': 'enabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, + {'binary': binary, + 'host': host, + 'zone': 'nova', + 'status': 'disabled', + 'state': 'down', + 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}, + ]}) + + def put_os_services_enable(self, body, **kw): + return (200, {}, {'service': {'host': body['host'], + 'binary': body['binary'], + 'status': 'enabled'}}) + + def put_os_services_disable(self, body, **kw): + return (200, {}, {'service': {'host': body['host'], + 'binary': body['binary'], + 'status': 'disabled'}}) + + def put_os_services_disable_log_reason(self, body, **kw): + return (200, {}, {'service': {'host': body['host'], + 'binary': body['binary'], + 'status': 'disabled', + 'disabled_reason': body['disabled_reason']}}) + + # + # Fixed IPs + # + def get_os_fixed_ips_192_168_1_1(self, *kw): + return (200, {}, {"fixed_ip": + {'cidr': '192.168.1.0/24', + 'address': '192.168.1.1', + 'hostname': 'foo', + 'host': 'bar'}}) + + def post_os_fixed_ips_192_168_1_1_action(self, body, **kw): + return (202, {}, None) + + # + # Hosts + # + def get_os_hosts_host(self, *kw): + return (200, {}, {'host': + [{'resource': {'project': '(total)', 'host': 'dummy', + 'cpu': 16, 'memory_mb': 32234, 'disk_gb': 128}}, + {'resource': {'project': '(used_now)', 'host': 'dummy', + 'cpu': 1, 'memory_mb': 2075, 'disk_gb': 45}}, + {'resource': {'project': '(used_max)', 'host': 'dummy', + 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}}, + {'resource': {'project': 'admin', 'host': 'dummy', + 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}}]}) + + def get_os_hosts(self, **kw): + zone = kw.get('zone', 'nova1') + return (200, {}, {'hosts': + [{'host': 'host1', + 'service': 'nova-compute', + 'zone': zone}, + {'host': 'host1', + 'service': 'nova-cert', + 'zone': zone}]}) + + def get_os_hosts_sample_host(self, *kw): + return (200, {}, {'host': [{'resource': {'host': 'sample_host'}}], }) + + def put_os_hosts_sample_host_1(self, body, **kw): + return (200, {}, {'host': 'sample-host_1', + 'status': 'enabled'}) + + def put_os_hosts_sample_host_2(self, body, **kw): + return (200, {}, {'host': 'sample-host_2', + 'maintenance_mode': 'on_maintenance'}) + + def put_os_hosts_sample_host_3(self, body, **kw): + return (200, {}, {'host': 'sample-host_3', + 'status': 'enabled', + 'maintenance_mode': 'on_maintenance'}) + + def get_os_hosts_sample_host_reboot(self, **kw): + return (200, {}, {'host': 'sample_host', + 'power_action': 'reboot'}) + + def get_os_hosts_sample_host_startup(self, **kw): + return (200, {}, {'host': 'sample_host', + 'power_action': 'startup'}) + + def get_os_hosts_sample_host_shutdown(self, **kw): + return (200, {}, {'host': 'sample_host', + 'power_action': 'shutdown'}) + + def put_os_hosts_sample_host(self, body, **kw): + result = {'host': 'dummy'} + result.update(body) + return (200, {}, result) + + def get_os_hypervisors(self, **kw): + return (200, {}, {"hypervisors": [ + {'id': 1234, 'hypervisor_hostname': 'hyper1'}, + {'id': 5678, 'hypervisor_hostname': 'hyper2'}, + ]}) + + def get_os_hypervisors_detail(self, **kw): + return (200, {}, {"hypervisors": [ + {'id': 1234, + 'service': {'id': 1, 'host': 'compute1'}, + 'vcpus': 4, + 'memory_mb': 10 * 1024, + 'local_gb': 250, + 'vcpus_used': 2, + 'memory_mb_used': 5 * 1024, + 'local_gb_used': 125, + 'hypervisor_type': "xen", + 'hypervisor_version': 3, + 'hypervisor_hostname': "hyper1", + 'free_ram_mb': 5 * 1024, + 'free_disk_gb': 125, + 'current_workload': 2, + 'running_vms': 2, + 'cpu_info': 'cpu_info', + 'disk_available_least': 100}, + {'id': 2, + 'service': {'id': 2, 'host': "compute2"}, + 'vcpus': 4, + 'memory_mb': 10 * 1024, + 'local_gb': 250, + 'vcpus_used': 2, + 'memory_mb_used': 5 * 1024, + 'local_gb_used': 125, + 'hypervisor_type': "xen", + 'hypervisor_version': 3, + 'hypervisor_hostname': "hyper2", + 'free_ram_mb': 5 * 1024, + 'free_disk_gb': 125, + 'current_workload': 2, + 'running_vms': 2, + 'cpu_info': 'cpu_info', + 'disk_available_least': 100} + ]}) + + def get_os_hypervisors_statistics(self, **kw): + return (200, {}, {"hypervisor_statistics": { + 'count': 2, + 'vcpus': 8, + 'memory_mb': 20 * 1024, + 'local_gb': 500, + 'vcpus_used': 4, + 'memory_mb_used': 10 * 1024, + 'local_gb_used': 250, + 'free_ram_mb': 10 * 1024, + 'free_disk_gb': 250, + 'current_workload': 4, + 'running_vms': 4, + 'disk_available_least': 200, + }}) + + def get_os_hypervisors_hyper_search(self, **kw): + return (200, {}, {'hypervisors': [ + {'id': 1234, 'hypervisor_hostname': 'hyper1'}, + {'id': 5678, 'hypervisor_hostname': 'hyper2'} + ]}) + + def get_os_hypervisors_hyper_servers(self, **kw): + return (200, {}, {'hypervisors': [ + {'id': 1234, + 'hypervisor_hostname': 'hyper1', + 'servers': [ + {'name': 'inst1', 'uuid': 'uuid1'}, + {'name': 'inst2', 'uuid': 'uuid2'} + ]}, + {'id': 5678, + 'hypervisor_hostname': 'hyper2', + 'servers': [ + {'name': 'inst3', 'uuid': 'uuid3'}, + {'name': 'inst4', 'uuid': 'uuid4'} + ]} + ]}) + + def get_os_hypervisors_hyper_no_servers_servers(self, **kw): + return (200, {}, {'hypervisors': + [{'id': 1234, 'hypervisor_hostname': 'hyper1'}]}) + + def get_os_hypervisors_1234(self, **kw): + return (200, {}, {'hypervisor': + {'id': 1234, + 'service': {'id': 1, 'host': 'compute1'}, + 'vcpus': 4, + 'memory_mb': 10 * 1024, + 'local_gb': 250, + 'vcpus_used': 2, + 'memory_mb_used': 5 * 1024, + 'local_gb_used': 125, + 'hypervisor_type': "xen", + 'hypervisor_version': 3, + 'hypervisor_hostname': "hyper1", + 'free_ram_mb': 5 * 1024, + 'free_disk_gb': 125, + 'current_workload': 2, + 'running_vms': 2, + 'cpu_info': 'cpu_info', + 'disk_available_least': 100}}) + + def get_os_hypervisors_1234_uptime(self, **kw): + return (200, {}, {'hypervisor': + {'id': 1234, + 'hypervisor_hostname': "hyper1", + 'uptime': "fake uptime"}}) + + def get_os_networks(self, **kw): + return (200, {}, {'networks': [{"label": "1", "cidr": "10.0.0.0/24", + 'project_id': '4ffc664c198e435e9853f2538fbcd7a7', + 'id': '1'}]}) + + def post_os_networks(self, **kw): + return (202, {}, {'network': kw}) + + def get_os_networks_1(self, **kw): + return (200, {}, {'network': {"label": "1", "cidr": "10.0.0.0/24"}}) + + def delete_os_networks_networkdelete(self, **kw): + return (202, {}, None) + + def post_os_networks_add(self, **kw): + return (202, {}, None) + + def post_os_networks_networkdisassociate_action(self, **kw): + return (202, {}, None) + + def get_os_fping(self, **kw): + return ( + 200, {}, { + 'servers': [ + { + "id": "1", + "project_id": "fake-project", + "alive": True, + }, + { + "id": "2", + "project_id": "fake-project", + "alive": True, + }, + ] + } + ) + + def get_os_fping_1(self, **kw): + return ( + 200, {}, { + 'server': { + "id": "1", + "project_id": "fake-project", + "alive": True, + } + } + ) + + def post_os_coverage_action(self, body, **kw): + if 'report' not in body: + return (200, {}, None) + else: + return (200, {}, { + 'path': '/tmp/tmpdir/' + body['report']['file'] + }) + + def post_os_networks_1_action(self, **kw): + return (202, {}, None) + + def post_os_networks_networktest_action(self, **kw): + return (202, {}, None) + + def post_os_networks_2_action(self, **kw): + return (202, {}, None) + + def post_os_coverage_action(self, body, **kw): + if 'start' in body or 'reset' in body: + return (200, {}, None) + elif 'stop' in body: + return (200, {}, {'path': '/tmp/tmpdir/'}) + else: + return (200, {}, { + 'path': '/tmp/tmpdir/' + body['report']['file'] + }) + + def get_os_availability_zone(self, **kw): + return (200, {}, {"availabilityZoneInfo": [ + {"zoneName": "zone-1", + "zoneState": {"available": True}, + "hosts": None}, + {"zoneName": "zone-2", + "zoneState": {"available": False}, + "hosts": None}]}) + + def get_os_availability_zone_detail(self, **kw): + return (200, {}, {"availabilityZoneInfo": [ + {"zoneName": "zone-1", + "zoneState": {"available": True}, + "hosts": { + "fake_host-1": { + "nova-compute": {"active": True, + "available": True, + "updated_at": + datetime(2012, 12, 26, 14, 45, 25, 0)}}}}, + {"zoneName": "internal", + "zoneState": {"available": True}, + "hosts": { + "fake_host-1": { + "nova-sched": { + "active": True, + "available": True, + "updated_at": + datetime(2012, 12, 26, 14, 45, 25, 0)}}, + "fake_host-2": { + "nova-network": { + "active": True, + "available": False, + "updated_at": + datetime(2012, 12, 26, 14, 45, 24, 0)}}}}, + {"zoneName": "zone-2", + "zoneState": {"available": False}, + "hosts": None}]}) + + def get_servers_1234_os_interface(self, **kw): + return (200, {}, {"interfaceAttachments": [ + {"port_state": "ACTIVE", + "net_id": "net-id-1", + "port_id": "port-id-1", + "mac_address": "aa:bb:cc:dd:ee:ff", + "fixed_ips": [{"ip_address": "1.2.3.4"}], + }, + {"port_state": "ACTIVE", + "net_id": "net-id-1", + "port_id": "port-id-1", + "mac_address": "aa:bb:cc:dd:ee:ff", + "fixed_ips": [{"ip_address": "1.2.3.4"}], + }]}) + + def post_servers_1234_os_interface(self, **kw): + return (200, {}, {'interfaceAttachment': {}}) + + def delete_servers_1234_os_interface_port_id(self, **kw): + return (200, {}, None) + + # NOTE (vkhomenko): + # Volume responses was taken from: + # https://wiki.openstack.org/wiki/CreateVolumeFromImage + # http://jorgew.github.com/block-storage-api/content/ + # GET_listDetailVolumes_v1__tenantId__volumes_detail_.html + # I suppose they are outdated and should be updated after Cinder released + + def get_volumes_detail(self, **kw): + return (200, {}, {"volumes": [ + {"display_name": "Work", + "display_description": "volume for work", + "status": "ATTACHED", + "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", + "created_at": "2011-09-09T00:00:00Z", + "attached": "2011-11-11T00:00:00Z", + "size": 1024, + "attachments": [ + {"id": "3333", + "links": ''}], + "metadata": {}}]}) + + def get_volumes(self, **kw): + return (200, {}, {"volumes": [ + {"display_name": "Work", + "display_description": "volume for work", + "status": "ATTACHED", + "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", + "created_at": "2011-09-09T00:00:00Z", + "attached": "2011-11-11T00:00:00Z", + "size": 1024, + "attachments": [ + {"id": "3333", + "links": ''}], + "metadata": {}}]}) + + def get_volumes_15e59938_07d5_11e1_90e3_e3dffe0c5983(self, **kw): + return (200, {}, {"volume": + {"display_name": "Work", + "display_description": "volume for work", + "status": "ATTACHED", + "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", + "created_at": "2011-09-09T00:00:00Z", + "attached": "2011-11-11T00:00:00Z", + "size": 1024, + "attachments": [ + {"id": "3333", + "links": ''}], + "metadata": {}}}) + + def post_volumes(self, **kw): + return (200, {}, {"volume": + {"status": "creating", + "display_name": "vol-007", + "attachments": [(0)], + "availability_zone": "cinder", + "created_at": "2012-08-13T10:57:17.000000", + "display_description": "create volume from image", + "image_id": "f4cf905f-7c58-4d7b-8314-8dd8a2d1d483", + "volume_type": "None", + "metadata": {}, + "id": "5cb239f6-1baf-4fe1-bd78-c852cf00fa39", + "size": 1}}) + + def delete_volumes_15e59938_07d5_11e1_90e3_e3dffe0c5983(self, **kw): + return (200, {}, {}) + + def post_servers_1234_os_volume_attachments(self, **kw): + return (200, {}, {"volumeAttachment": + {"device": "/dev/vdb", + "volumeId": 2}}) + + def put_servers_1234_os_volume_attachments_Work(self, **kw): + return (200, {}, {"volumeAttachment": {"volumeId": 2}}) + + def get_servers_1234_os_volume_attachments(self, **kw): + return (200, {}, {"volumeAttachments": [ + {"display_name": "Work", + "display_description": "volume for work", + "status": "ATTACHED", + "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", + "created_at": "2011-09-09T00:00:00Z", + "attached": "2011-11-11T00:00:00Z", + "size": 1024, + "attachments": [ + {"id": "3333", + "links": ''}], + "metadata": {}}]}) + + def get_servers_1234_os_volume_attachments_Work(self, **kw): + return (200, {}, {"volumeAttachment": + {"display_name": "Work", + "display_description": "volume for work", + "status": "ATTACHED", + "id": "15e59938-07d5-11e1-90e3-e3dffe0c5983", + "created_at": "2011-09-09T00:00:00Z", + "attached": "2011-11-11T00:00:00Z", + "size": 1024, + "attachments": [ + {"id": "3333", + "links": ''}], + "metadata": {}}}) + + def delete_servers_1234_os_volume_attachments_Work(self, **kw): + return (200, {}, {}) + + def get_servers_1234_os_instance_actions(self, **kw): + return (200, {}, {"instanceActions": + [{"instance_uuid": "1234", + "user_id": "b968c25e04ab405f9fe4e6ca54cce9a5", + "start_time": "2013-03-25T13:45:09.000000", + "request_id": "req-abcde12345", + "action": "create", + "message": None, + "project_id": "04019601fe3648c0abd4f4abfb9e6106"}]}) + + def get_servers_1234_os_instance_actions_req_abcde12345(self, **kw): + return (200, {}, {"instanceAction": + {"instance_uuid": "1234", + "user_id": "b968c25e04ab405f9fe4e6ca54cce9a5", + "start_time": "2013-03-25T13:45:09.000000", + "request_id": "req-abcde12345", + "action": "create", + "message": None, + "project_id": "04019601fe3648c0abd4f4abfb9e6106"}}) + + def post_servers_uuid1_action(self, **kw): + return 202, {}, {} + + def post_servers_uuid2_action(self, **kw): + return 202, {}, {} + + def post_servers_uuid3_action(self, **kw): + return 202, {}, {} + + def post_servers_uuid4_action(self, **kw): + return 202, {}, {} + + def get_os_cells_child_cell(self, **kw): + cell = {'cell': { + 'username': 'cell1_user', + 'name': 'cell1', + 'rpc_host': '10.0.1.10', + '_info': { + 'username': 'cell1_user', + 'rpc_host': '10.0.1.10', + 'type': 'child', + 'name': 'cell1', + 'rpc_port': 5673 + }, + 'type': 'child', + 'rpc_port': 5673, + '_loaded': True + }} + return (200, {}, cell) + + def get_os_cells_capacities(self, **kw): + cell_capacities_response = {"cell": {"capacities": {"ram_free": { + "units_by_mb": {"8192": 0, "512": 13, "4096": 1, "2048": 3, + "16384": 0}, "total_mb": 7680}, "disk_free": { + "units_by_mb": {"81920": 11, "20480": 46, "40960": 23, "163840": 5, + "0": 0}, "total_mb": 1052672}}}} + return (200, {}, cell_capacities_response) + + def get_os_cells_child_cell_capacities(self, **kw): + return self.get_os_cells_capacities() + + def get_os_migrations(self, **kw): + migrations = {'migrations': + [{ + "created_at": "2012-10-29T13:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1234, + "instance_uuid": "instance_id_123", + "new_instance_type_id": 2, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "Done", + "updated_at": "2012-10-29T13:42:02.000000" + }]} + return (200, {}, migrations) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_agents.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_agents.py new file mode 100644 index 0000000000..c9eaf37874 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_agents.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1 import agents +from novaclient.tests.v1_1 import fakes +from novaclient.tests import utils + + +cs = fakes.FakeClient() + + +class AgentsTest(utils.TestCase): + + def test_list_agents(self): + ags = cs.agents.list() + cs.assert_called('GET', '/os-agents') + [self.assertTrue(isinstance(a, agents.Agent)) for a in ags] + [self.assertEqual(a.hypervisor, 'kvm') for a in ags] + + def test_list_agents_with_hypervisor(self): + ags = cs.agents.list('xen') + cs.assert_called('GET', '/os-agents?hypervisor=xen') + [self.assertTrue(isinstance(a, agents.Agent)) for a in ags] + [self.assertEqual(a.hypervisor, 'xen') for a in ags] + + def test_agents_create(self): + ag = cs.agents.create('win', 'x86', '7.0', + '/xxx/xxx/xxx', + 'add6bb58e139be103324d04d82d8f546', + 'xen') + body = {'agent': { + 'url': '/xxx/xxx/xxx', + 'hypervisor': 'xen', + 'md5hash': 'add6bb58e139be103324d04d82d8f546', + 'version': '7.0', + 'architecture': 'x86', + 'os': 'win'}} + cs.assert_called('POST', '/os-agents', body) + self.assertEqual(1, ag._info.copy()['id']) + + def test_agents_delete(self): + cs.agents.delete('1') + cs.assert_called('DELETE', '/os-agents/1') + + def test_agents_modify(self): + ag = cs.agents.update('1', '8.0', + '/yyy/yyyy/yyyy', + 'add6bb58e139be103324d04d82d8f546') + body = {"para": { + "url": "/yyy/yyyy/yyyy", + "version": "8.0", + "md5hash": "add6bb58e139be103324d04d82d8f546"}} + cs.assert_called('PUT', '/os-agents/1', body) + self.assertEqual(1, ag.id) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_aggregates.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_aggregates.py new file mode 100644 index 0000000000..bbd7f2c10e --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_aggregates.py @@ -0,0 +1,138 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1 import aggregates +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class AggregatesTest(utils.TestCase): + + def test_list_aggregates(self): + result = cs.aggregates.list() + cs.assert_called('GET', '/os-aggregates') + for aggregate in result: + self.assertTrue(isinstance(aggregate, aggregates.Aggregate)) + + def test_create_aggregate(self): + body = {"aggregate": {"name": "test", "availability_zone": "nova1"}} + aggregate = cs.aggregates.create("test", "nova1") + cs.assert_called('POST', '/os-aggregates', body) + self.assertTrue(isinstance(aggregate, aggregates.Aggregate)) + + def test_get(self): + aggregate = cs.aggregates.get("1") + cs.assert_called('GET', '/os-aggregates/1') + self.assertTrue(isinstance(aggregate, aggregates.Aggregate)) + + aggregate2 = cs.aggregates.get(aggregate) + cs.assert_called('GET', '/os-aggregates/1') + self.assertTrue(isinstance(aggregate2, aggregates.Aggregate)) + + def test_get_details(self): + aggregate = cs.aggregates.get_details("1") + cs.assert_called('GET', '/os-aggregates/1') + self.assertTrue(isinstance(aggregate, aggregates.Aggregate)) + + aggregate2 = cs.aggregates.get_details(aggregate) + cs.assert_called('GET', '/os-aggregates/1') + self.assertTrue(isinstance(aggregate2, aggregates.Aggregate)) + + def test_update(self): + aggregate = cs.aggregates.get("1") + values = {"name": "foo"} + body = {"aggregate": values} + + result1 = aggregate.update(values) + cs.assert_called('PUT', '/os-aggregates/1', body) + self.assertTrue(isinstance(result1, aggregates.Aggregate)) + + result2 = cs.aggregates.update(2, values) + cs.assert_called('PUT', '/os-aggregates/2', body) + self.assertTrue(isinstance(result2, aggregates.Aggregate)) + + def test_update_with_availability_zone(self): + aggregate = cs.aggregates.get("1") + values = {"name": "foo", "availability_zone": "new_zone"} + body = {"aggregate": values} + + result3 = cs.aggregates.update(aggregate, values) + cs.assert_called('PUT', '/os-aggregates/1', body) + self.assertTrue(isinstance(result3, aggregates.Aggregate)) + + def test_add_host(self): + aggregate = cs.aggregates.get("1") + host = "host1" + body = {"add_host": {"host": "host1"}} + + result1 = aggregate.add_host(host) + cs.assert_called('POST', '/os-aggregates/1/action', body) + self.assertTrue(isinstance(result1, aggregates.Aggregate)) + + result2 = cs.aggregates.add_host("2", host) + cs.assert_called('POST', '/os-aggregates/2/action', body) + self.assertTrue(isinstance(result2, aggregates.Aggregate)) + + result3 = cs.aggregates.add_host(aggregate, host) + cs.assert_called('POST', '/os-aggregates/1/action', body) + self.assertTrue(isinstance(result3, aggregates.Aggregate)) + + def test_remove_host(self): + aggregate = cs.aggregates.get("1") + host = "host1" + body = {"remove_host": {"host": "host1"}} + + result1 = aggregate.remove_host(host) + cs.assert_called('POST', '/os-aggregates/1/action', body) + self.assertTrue(isinstance(result1, aggregates.Aggregate)) + + result2 = cs.aggregates.remove_host("2", host) + cs.assert_called('POST', '/os-aggregates/2/action', body) + self.assertTrue(isinstance(result2, aggregates.Aggregate)) + + result3 = cs.aggregates.remove_host(aggregate, host) + cs.assert_called('POST', '/os-aggregates/1/action', body) + self.assertTrue(isinstance(result3, aggregates.Aggregate)) + + def test_set_metadata(self): + aggregate = cs.aggregates.get("1") + metadata = {"foo": "bar"} + body = {"set_metadata": {"metadata": metadata}} + + result1 = aggregate.set_metadata(metadata) + cs.assert_called('POST', '/os-aggregates/1/action', body) + self.assertTrue(isinstance(result1, aggregates.Aggregate)) + + result2 = cs.aggregates.set_metadata(2, metadata) + cs.assert_called('POST', '/os-aggregates/2/action', body) + self.assertTrue(isinstance(result2, aggregates.Aggregate)) + + result3 = cs.aggregates.set_metadata(aggregate, metadata) + cs.assert_called('POST', '/os-aggregates/1/action', body) + self.assertTrue(isinstance(result3, aggregates.Aggregate)) + + def test_delete_aggregate(self): + aggregate = cs.aggregates.list()[0] + aggregate.delete() + cs.assert_called('DELETE', '/os-aggregates/1') + + cs.aggregates.delete('1') + cs.assert_called('DELETE', '/os-aggregates/1') + + cs.aggregates.delete(aggregate) + cs.assert_called('DELETE', '/os-aggregates/1') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_auth.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_auth.py new file mode 100644 index 0000000000..537f59b5f7 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_auth.py @@ -0,0 +1,399 @@ +import copy +import json +import mock + +import requests + +from novaclient.v1_1 import client +from novaclient import exceptions +from novaclient.tests import utils + + +class AuthenticateAgainstKeystoneTests(utils.TestCase): + def test_authenticate_success(self): + cs = client.Client("username", "password", "project_id", + "auth_url/v2.0", service_type='compute') + resp = { + "access": { + "token": { + "expires": "12345", + "id": "FAKE_ID", + "tenant": { + "id": "FAKE_TENANT_ID", + } + }, + "serviceCatalog": [ + { + "type": "compute", + "endpoints": [ + { + "region": "RegionOne", + "adminURL": "http://localhost:8774/v1.1", + "internalURL": "http://localhost:8774/v1.1", + "publicURL": "http://localhost:8774/v1.1/", + }, + ], + }, + ], + }, + } + auth_response = utils.TestResponse({ + "status_code": 200, + "text": json.dumps(resp), + }) + + mock_request = mock.Mock(return_value=(auth_response)) + + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + cs.client.authenticate() + headers = { + 'User-Agent': cs.client.USER_AGENT, + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + body = { + 'auth': { + 'passwordCredentials': { + 'username': cs.client.user, + 'password': cs.client.password, + }, + 'tenantName': cs.client.projectid, + }, + } + + token_url = cs.client.auth_url + "/tokens" + mock_request.assert_called_with( + "POST", + token_url, + headers=headers, + data=json.dumps(body), + allow_redirects=True, + **self.TEST_REQUEST_BASE) + + endpoints = resp["access"]["serviceCatalog"][0]['endpoints'] + public_url = endpoints[0]["publicURL"].rstrip('/') + self.assertEqual(cs.client.management_url, public_url) + token_id = resp["access"]["token"]["id"] + self.assertEqual(cs.client.auth_token, token_id) + + test_auth_call() + + def test_authenticate_failure(self): + cs = client.Client("username", "password", "project_id", + "auth_url/v2.0") + resp = {"unauthorized": {"message": "Unauthorized", "code": "401"}} + auth_response = utils.TestResponse({ + "status_code": 401, + "text": json.dumps(resp), + }) + + mock_request = mock.Mock(return_value=(auth_response)) + + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + self.assertRaises(exceptions.Unauthorized, cs.client.authenticate) + + test_auth_call() + + def test_v1_auth_redirect(self): + cs = client.Client("username", "password", "project_id", + "auth_url/v1.0", service_type='compute') + dict_correct_response = { + "access": { + "token": { + "expires": "12345", + "id": "FAKE_ID", + "tenant": { + "id": "FAKE_TENANT_ID", + } + }, + "serviceCatalog": [ + { + "type": "compute", + "endpoints": [ + { + "adminURL": "http://localhost:8774/v1.1", + "region": "RegionOne", + "internalURL": "http://localhost:8774/v1.1", + "publicURL": "http://localhost:8774/v1.1/", + }, + ], + }, + ], + }, + } + correct_response = json.dumps(dict_correct_response) + dict_responses = [ + {"headers": {'location': 'http://127.0.0.1:5001'}, + "status_code": 305, + "text": "Use proxy"}, + # Configured on admin port, nova redirects to v2.0 port. + # When trying to connect on it, keystone auth succeed by v1.0 + # protocol (through headers) but tokens are being returned in + # body (looks like keystone bug). Leaved for compatibility. + {"headers": {}, + "status_code": 200, + "text": correct_response}, + {"headers": {}, + "status_code": 200, + "text": correct_response} + ] + + responses = [(utils.TestResponse(resp)) for resp in dict_responses] + + def side_effect(*args, **kwargs): + return responses.pop(0) + + mock_request = mock.Mock(side_effect=side_effect) + + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + cs.client.authenticate() + headers = { + 'User-Agent': cs.client.USER_AGENT, + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + body = { + 'auth': { + 'passwordCredentials': { + 'username': cs.client.user, + 'password': cs.client.password, + }, + 'tenantName': cs.client.projectid, + }, + } + + token_url = cs.client.auth_url + "/tokens" + kwargs = copy.copy(self.TEST_REQUEST_BASE) + kwargs['headers'] = headers + kwargs['data'] = json.dumps(body) + mock_request.assert_called_with( + "POST", + token_url, + allow_redirects=True, + **kwargs) + + resp = dict_correct_response + endpoints = resp["access"]["serviceCatalog"][0]['endpoints'] + public_url = endpoints[0]["publicURL"].rstrip('/') + self.assertEqual(cs.client.management_url, public_url) + token_id = resp["access"]["token"]["id"] + self.assertEqual(cs.client.auth_token, token_id) + + test_auth_call() + + def test_v2_auth_redirect(self): + cs = client.Client("username", "password", "project_id", + "auth_url/v2.0", service_type='compute') + dict_correct_response = { + "access": { + "token": { + "expires": "12345", + "id": "FAKE_ID", + "tenant": { + "id": "FAKE_TENANT_ID", + } + }, + "serviceCatalog": [ + { + "type": "compute", + "endpoints": [ + { + "adminURL": "http://localhost:8774/v1.1", + "region": "RegionOne", + "internalURL": "http://localhost:8774/v1.1", + "publicURL": "http://localhost:8774/v1.1/", + }, + ], + }, + ], + }, + } + correct_response = json.dumps(dict_correct_response) + dict_responses = [ + {"headers": {'location': 'http://127.0.0.1:5001'}, + "status_code": 305, + "text": "Use proxy"}, + # Configured on admin port, nova redirects to v2.0 port. + # When trying to connect on it, keystone auth succeed by v1.0 + # protocol (through headers) but tokens are being returned in + # body (looks like keystone bug). Leaved for compatibility. + {"headers": {}, + "status_code": 200, + "text": correct_response}, + {"headers": {}, + "status_code": 200, + "text": correct_response} + ] + + responses = [(utils.TestResponse(resp)) for resp in dict_responses] + + def side_effect(*args, **kwargs): + return responses.pop(0) + + mock_request = mock.Mock(side_effect=side_effect) + + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + cs.client.authenticate() + headers = { + 'User-Agent': cs.client.USER_AGENT, + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + body = { + 'auth': { + 'passwordCredentials': { + 'username': cs.client.user, + 'password': cs.client.password, + }, + 'tenantName': cs.client.projectid, + }, + } + + token_url = cs.client.auth_url + "/tokens" + kwargs = copy.copy(self.TEST_REQUEST_BASE) + kwargs['headers'] = headers + kwargs['data'] = json.dumps(body) + mock_request.assert_called_with( + "POST", + token_url, + allow_redirects=True, + **kwargs) + + resp = dict_correct_response + endpoints = resp["access"]["serviceCatalog"][0]['endpoints'] + public_url = endpoints[0]["publicURL"].rstrip('/') + self.assertEqual(cs.client.management_url, public_url) + token_id = resp["access"]["token"]["id"] + self.assertEqual(cs.client.auth_token, token_id) + + test_auth_call() + + def test_ambiguous_endpoints(self): + cs = client.Client("username", "password", "project_id", + "auth_url/v2.0", service_type='compute') + resp = { + "access": { + "token": { + "expires": "12345", + "id": "FAKE_ID", + "tenant": { + "id": "FAKE_TENANT_ID", + } + }, + "serviceCatalog": [ + { + "adminURL": "http://localhost:8774/v1.1", + "type": "compute", + "name": "Compute CLoud", + "endpoints": [ + { + "region": "RegionOne", + "internalURL": "http://localhost:8774/v1.1", + "publicURL": "http://localhost:8774/v1.1/", + }, + ], + }, + { + "adminURL": "http://localhost:8774/v1.1", + "type": "compute", + "name": "Hyper-compute Cloud", + "endpoints": [ + { + "internalURL": "http://localhost:8774/v1.1", + "publicURL": "http://localhost:8774/v1.1/", + }, + ], + }, + ], + }, + } + auth_response = utils.TestResponse({ + "status_code": 200, + "text": json.dumps(resp), + }) + + mock_request = mock.Mock(return_value=(auth_response)) + + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + self.assertRaises(exceptions.AmbiguousEndpoints, + cs.client.authenticate) + + test_auth_call() + + +class AuthenticationTests(utils.TestCase): + def test_authenticate_success(self): + cs = client.Client("username", "password", "project_id", "auth_url") + management_url = 'https://localhost/v1.1/443470' + auth_response = utils.TestResponse({ + 'status_code': 204, + 'headers': { + 'x-server-management-url': management_url, + 'x-auth-token': '1b751d74-de0c-46ae-84f0-915744b582d1', + }, + }) + mock_request = mock.Mock(return_value=(auth_response)) + + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + cs.client.authenticate() + headers = { + 'Accept': 'application/json', + 'X-Auth-User': 'username', + 'X-Auth-Key': 'password', + 'X-Auth-Project-Id': 'project_id', + 'User-Agent': cs.client.USER_AGENT + } + mock_request.assert_called_with( + "GET", + cs.client.auth_url, + headers=headers, + **self.TEST_REQUEST_BASE) + + self.assertEqual(cs.client.management_url, + auth_response.headers['x-server-management-url']) + self.assertEqual(cs.client.auth_token, + auth_response.headers['x-auth-token']) + + test_auth_call() + + def test_authenticate_failure(self): + cs = client.Client("username", "password", "project_id", "auth_url") + auth_response = utils.TestResponse({'status_code': 401}) + mock_request = mock.Mock(return_value=(auth_response)) + + @mock.patch.object(requests.Session, "request", mock_request) + def test_auth_call(): + self.assertRaises(exceptions.Unauthorized, cs.client.authenticate) + + test_auth_call() + + def test_auth_automatic(self): + cs = client.Client("username", "password", "project_id", "auth_url") + http_client = cs.client + http_client.management_url = '' + mock_request = mock.Mock(return_value=(None, None)) + + @mock.patch.object(http_client, 'request', mock_request) + @mock.patch.object(http_client, 'authenticate') + def test_auth_call(m): + http_client.get('/') + m.assert_called() + mock_request.assert_called() + + test_auth_call() + + def test_auth_manual(self): + cs = client.Client("username", "password", "project_id", "auth_url") + + @mock.patch.object(cs.client, 'authenticate') + def test_auth_call(m): + cs.authenticate() + m.assert_called() + + test_auth_call() diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_availability_zone.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_availability_zone.py new file mode 100644 index 0000000000..93cb6bebf4 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_availability_zone.py @@ -0,0 +1,92 @@ +# Copyright 2011 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from novaclient.v1_1 import availability_zones +from novaclient.v1_1 import shell +from novaclient.tests.v1_1 import fakes +from novaclient.tests import utils + + +cs = fakes.FakeClient() + + +class AvailabilityZoneTest(utils.TestCase): + + def _assertZone(self, zone, name, status): + self.assertEqual(zone.zoneName, name) + self.assertEqual(zone.zoneState, status) + + def test_list_availability_zone(self): + zones = cs.availability_zones.list(detailed=False) + cs.assert_called('GET', '/os-availability-zone') + + for zone in zones: + self.assertTrue(isinstance(zone, + availability_zones.AvailabilityZone)) + + self.assertEqual(2, len(zones)) + + l0 = [six.u('zone-1'), six.u('available')] + l1 = [six.u('zone-2'), six.u('not available')] + + z0 = shell._treeizeAvailabilityZone(zones[0]) + z1 = shell._treeizeAvailabilityZone(zones[1]) + + self.assertEqual((len(z0), len(z1)), (1, 1)) + + self._assertZone(z0[0], l0[0], l0[1]) + self._assertZone(z1[0], l1[0], l1[1]) + + def test_detail_availability_zone(self): + zones = cs.availability_zones.list(detailed=True) + cs.assert_called('GET', '/os-availability-zone/detail') + + for zone in zones: + self.assertTrue(isinstance(zone, + availability_zones.AvailabilityZone)) + + self.assertEqual(3, len(zones)) + + l0 = [six.u('zone-1'), six.u('available')] + l1 = [six.u('|- fake_host-1'), six.u('')] + l2 = [six.u('| |- nova-compute'), + six.u('enabled :-) 2012-12-26 14:45:25')] + l3 = [six.u('internal'), six.u('available')] + l4 = [six.u('|- fake_host-1'), six.u('')] + l5 = [six.u('| |- nova-sched'), + six.u('enabled :-) 2012-12-26 14:45:25')] + l6 = [six.u('|- fake_host-2'), six.u('')] + l7 = [six.u('| |- nova-network'), + six.u('enabled XXX 2012-12-26 14:45:24')] + l8 = [six.u('zone-2'), six.u('not available')] + + z0 = shell._treeizeAvailabilityZone(zones[0]) + z1 = shell._treeizeAvailabilityZone(zones[1]) + z2 = shell._treeizeAvailabilityZone(zones[2]) + + self.assertEqual((len(z0), len(z1), len(z2)), (3, 5, 1)) + + self._assertZone(z0[0], l0[0], l0[1]) + self._assertZone(z0[1], l1[0], l1[1]) + self._assertZone(z0[2], l2[0], l2[1]) + self._assertZone(z1[0], l3[0], l3[1]) + self._assertZone(z1[1], l4[0], l4[1]) + self._assertZone(z1[2], l5[0], l5[1]) + self._assertZone(z1[3], l6[0], l6[1]) + self._assertZone(z1[4], l7[0], l7[1]) + self._assertZone(z2[0], l8[0], l8[1]) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_certs.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_certs.py new file mode 100644 index 0000000000..ff1f611211 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_certs.py @@ -0,0 +1,19 @@ +from novaclient.v1_1 import certs +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class FlavorsTest(utils.TestCase): + + def test_create_cert(self): + cert = cs.certs.create() + cs.assert_called('POST', '/os-certificates') + self.assertTrue(isinstance(cert, certs.Certificate)) + + def test_get_root_cert(self): + cert = cs.certs.get() + cs.assert_called('GET', '/os-certificates/root') + self.assertTrue(isinstance(cert, certs.Certificate)) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_cloudpipe.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_cloudpipe.py new file mode 100644 index 0000000000..e428629f28 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_cloudpipe.py @@ -0,0 +1,27 @@ +from novaclient.v1_1 import cloudpipe +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class CloudpipeTest(utils.TestCase): + + def test_list_cloudpipes(self): + cp = cs.cloudpipe.list() + cs.assert_called('GET', '/os-cloudpipe') + [self.assertTrue(isinstance(c, cloudpipe.Cloudpipe)) for c in cp] + + def test_create(self): + project = "test" + cp = cs.cloudpipe.create(project) + body = {'cloudpipe': {'project_id': project}} + cs.assert_called('POST', '/os-cloudpipe', body) + self.assertTrue(isinstance(cp, str)) + + def test_update(self): + cs.cloudpipe.update("192.168.1.1", 2345) + body = {'configure_project': {'vpn_ip': "192.168.1.1", + 'vpn_port': 2345}} + cs.assert_called('PUT', '/os-cloudpipe/configure-project', body) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_coverage_ext.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_coverage_ext.py new file mode 100644 index 0000000000..de23ee7b4f --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_coverage_ext.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# See: http://wiki.openstack.org/Nova/CoverageExtension for more information +# and usage explanation for this API extension + +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class CoverageTest(utils.TestCase): + + def test_start_coverage(self): + c = cs.coverage.start() + cs.assert_called('POST', '/os-coverage/action') + + def test_stop_coverage(self): + c = cs.coverage.stop() + return_dict = {'path': '/tmp/tmpdir/report'} + cs.assert_called_anytime('POST', '/os-coverage/action') + + def test_report_coverage(self): + c = cs.coverage.report('report') + return_dict = {'path': '/tmp/tmpdir/report'} + cs.assert_called_anytime('POST', '/os-coverage/action') + + def test_reset_coverage(self): + c = cs.coverage.reset() + cs.assert_called_anytime('POST', '/os-coverage/action') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_fixed_ips.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_fixed_ips.py new file mode 100644 index 0000000000..6881d1b405 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_fixed_ips.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.tests.v1_1 import fakes +from novaclient.tests import utils + +cs = fakes.FakeClient() + + +class FixedIpsTest(utils.TestCase): + + def test_get_fixed_ip(self): + info = cs.fixed_ips.get(fixed_ip='192.168.1.1') + cs.assert_called('GET', '/os-fixed-ips/192.168.1.1') + self.assertEqual(info.cidr, '192.168.1.0/24') + self.assertEqual(info.address, '192.168.1.1') + self.assertEqual(info.hostname, 'foo') + self.assertEqual(info.host, 'bar') + + def test_reserve_fixed_ip(self): + body = {"reserve": None} + res = cs.fixed_ips.reserve(fixed_ip='192.168.1.1') + cs.assert_called('POST', '/os-fixed-ips/192.168.1.1/action', body) + + def test_unreserve_fixed_ip(self): + body = {"unreserve": None} + res = cs.fixed_ips.unreserve(fixed_ip='192.168.1.1') + cs.assert_called('POST', '/os-fixed-ips/192.168.1.1/action', body) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_flavor_access.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_flavor_access.py new file mode 100644 index 0000000000..aea57a5110 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_flavor_access.py @@ -0,0 +1,58 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1 import flavor_access +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class FlavorAccessTest(utils.TestCase): + + def test_list_access_by_flavor_private(self): + kwargs = {'flavor': cs.flavors.get(2)} + r = cs.flavor_access.list(**kwargs) + cs.assert_called('GET', '/flavors/2/os-flavor-access') + [self.assertTrue(isinstance(a, flavor_access.FlavorAccess)) for a in r] + + def test_add_tenant_access(self): + flavor = cs.flavors.get(2) + tenant = 'proj2' + r = cs.flavor_access.add_tenant_access(flavor, tenant) + + body = { + "addTenantAccess": { + "tenant": "proj2" + } + } + + cs.assert_called('POST', '/flavors/2/action', body) + [self.assertTrue(isinstance(a, flavor_access.FlavorAccess)) for a in r] + + def test_remove_tenant_access(self): + flavor = cs.flavors.get(2) + tenant = 'proj2' + r = cs.flavor_access.remove_tenant_access(flavor, tenant) + + body = { + "removeTenantAccess": { + "tenant": "proj2" + } + } + + cs.assert_called('POST', '/flavors/2/action', body) + [self.assertTrue(isinstance(a, flavor_access.FlavorAccess)) for a in r] diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_flavors.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_flavors.py new file mode 100644 index 0000000000..a50783bddf --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_flavors.py @@ -0,0 +1,204 @@ +# Copyright (c) 2013, OpenStack +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import exceptions +from novaclient.v1_1 import flavors +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class FlavorsTest(utils.TestCase): + + def test_list_flavors(self): + fl = cs.flavors.list() + cs.assert_called('GET', '/flavors/detail') + for flavor in fl: + self.assertTrue(isinstance(flavor, flavors.Flavor)) + + def test_list_flavors_undetailed(self): + fl = cs.flavors.list(detailed=False) + cs.assert_called('GET', '/flavors') + for flavor in fl: + self.assertTrue(isinstance(flavor, flavors.Flavor)) + + def test_list_flavors_is_public_none(self): + fl = cs.flavors.list(is_public=None) + cs.assert_called('GET', '/flavors/detail?is_public=None') + for flavor in fl: + self.assertTrue(isinstance(flavor, flavors.Flavor)) + + def test_list_flavors_is_public_false(self): + fl = cs.flavors.list(is_public=False) + cs.assert_called('GET', '/flavors/detail?is_public=False') + for flavor in fl: + self.assertTrue(isinstance(flavor, flavors.Flavor)) + + def test_list_flavors_is_public_true(self): + fl = cs.flavors.list(is_public=True) + cs.assert_called('GET', '/flavors/detail') + for flavor in fl: + self.assertTrue(isinstance(flavor, flavors.Flavor)) + + def test_get_flavor_details(self): + f = cs.flavors.get(1) + cs.assert_called('GET', '/flavors/1') + self.assertTrue(isinstance(f, flavors.Flavor)) + self.assertEqual(f.ram, 256) + self.assertEqual(f.disk, 10) + self.assertEqual(f.ephemeral, 10) + self.assertEqual(f.is_public, True) + + def test_get_flavor_details_alphanum_id(self): + f = cs.flavors.get('aa1') + cs.assert_called('GET', '/flavors/aa1') + self.assertTrue(isinstance(f, flavors.Flavor)) + self.assertEqual(f.ram, 128) + self.assertEqual(f.disk, 0) + self.assertEqual(f.ephemeral, 0) + self.assertEqual(f.is_public, True) + + def test_get_flavor_details_diablo(self): + f = cs.flavors.get(3) + cs.assert_called('GET', '/flavors/3') + self.assertTrue(isinstance(f, flavors.Flavor)) + self.assertEqual(f.ram, 256) + self.assertEqual(f.disk, 10) + self.assertEqual(f.ephemeral, 'N/A') + self.assertEqual(f.is_public, 'N/A') + + def test_find(self): + f = cs.flavors.find(ram=256) + cs.assert_called('GET', '/flavors/detail') + self.assertEqual(f.name, '256 MB Server') + + f = cs.flavors.find(disk=0) + self.assertEqual(f.name, '128 MB Server') + + self.assertRaises(exceptions.NotFound, cs.flavors.find, disk=12345) + + def test_create(self): + f = cs.flavors.create("flavorcreate", 512, 1, 10, 1234, ephemeral=10, + is_public=False) + + body = { + "flavor": { + "name": "flavorcreate", + "ram": 512, + "vcpus": 1, + "disk": 10, + "OS-FLV-EXT-DATA:ephemeral": 10, + "id": 1234, + "swap": 0, + "rxtx_factor": 1.0, + "os-flavor-access:is_public": False, + } + } + + cs.assert_called('POST', '/flavors', body) + self.assertTrue(isinstance(f, flavors.Flavor)) + + def test_create_with_id_as_string(self): + flavor_id = 'foobar' + f = cs.flavors.create("flavorcreate", 512, + 1, 10, flavor_id, ephemeral=10, + is_public=False) + + body = { + "flavor": { + "name": "flavorcreate", + "ram": 512, + "vcpus": 1, + "disk": 10, + "OS-FLV-EXT-DATA:ephemeral": 10, + "id": flavor_id, + "swap": 0, + "rxtx_factor": 1.0, + "os-flavor-access:is_public": False, + } + } + + cs.assert_called('POST', '/flavors', body) + self.assertTrue(isinstance(f, flavors.Flavor)) + + def test_create_ephemeral_ispublic_defaults(self): + f = cs.flavors.create("flavorcreate", 512, 1, 10, 1234) + + body = { + "flavor": { + "name": "flavorcreate", + "ram": 512, + "vcpus": 1, + "disk": 10, + "OS-FLV-EXT-DATA:ephemeral": 0, + "id": 1234, + "swap": 0, + "rxtx_factor": 1.0, + "os-flavor-access:is_public": True, + } + } + + cs.assert_called('POST', '/flavors', body) + self.assertTrue(isinstance(f, flavors.Flavor)) + + def test_invalid_parameters_create(self): + self.assertRaises(exceptions.CommandError, cs.flavors.create, + "flavorcreate", "invalid", 1, 10, 1234, swap=0, + ephemeral=0, rxtx_factor=1.0, is_public=True) + self.assertRaises(exceptions.CommandError, cs.flavors.create, + "flavorcreate", 512, "invalid", 10, 1234, swap=0, + ephemeral=0, rxtx_factor=1.0, is_public=True) + self.assertRaises(exceptions.CommandError, cs.flavors.create, + "flavorcreate", 512, 1, "invalid", 1234, swap=0, + ephemeral=0, rxtx_factor=1.0, is_public=True) + self.assertRaises(exceptions.CommandError, cs.flavors.create, + "flavorcreate", 512, 1, 10, 1234, swap="invalid", + ephemeral=0, rxtx_factor=1.0, is_public=True) + self.assertRaises(exceptions.CommandError, cs.flavors.create, + "flavorcreate", 512, 1, 10, 1234, swap=0, + ephemeral="invalid", rxtx_factor=1.0, is_public=True) + self.assertRaises(exceptions.CommandError, cs.flavors.create, + "flavorcreate", 512, 1, 10, 1234, swap=0, + ephemeral=0, rxtx_factor="invalid", is_public=True) + self.assertRaises(exceptions.CommandError, cs.flavors.create, + "flavorcreate", 512, 1, 10, 1234, swap=0, + ephemeral=0, rxtx_factor=1.0, is_public='invalid') + + def test_delete(self): + cs.flavors.delete("flavordelete") + cs.assert_called('DELETE', '/flavors/flavordelete') + + def test_delete_with_flavor_instance(self): + f = cs.flavors.get(2) + cs.flavors.delete(f) + cs.assert_called('DELETE', '/flavors/2') + + def test_delete_with_flavor_instance_method(self): + f = cs.flavors.get(2) + f.delete() + cs.assert_called('DELETE', '/flavors/2') + + def test_set_keys(self): + f = cs.flavors.get(1) + f.set_keys({'k1': 'v1'}) + cs.assert_called('POST', '/flavors/1/os-extra_specs', + {"extra_specs": {'k1': 'v1'}}) + + def test_unset_keys(self): + f = cs.flavors.get(1) + f.unset_keys(['k1']) + cs.assert_called('DELETE', '/flavors/1/os-extra_specs/k1') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ip_dns.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ip_dns.py new file mode 100644 index 0000000000..96cf90f43b --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ip_dns.py @@ -0,0 +1,75 @@ +from novaclient.v1_1 import floating_ip_dns +from novaclient.tests.v1_1 import fakes +from novaclient.tests import utils + + +cs = fakes.FakeClient() + + +class FloatingIPDNSDomainTest(utils.TestCase): + + testdomain = "testdomain" + + def test_dns_domains(self): + domainlist = cs.dns_domains.domains() + self.assertEqual(len(domainlist), 2) + + for entry in domainlist: + self.assertTrue(isinstance(entry, + floating_ip_dns.FloatingIPDNSDomain)) + + self.assertEqual(domainlist[1].domain, 'example.com') + + def test_create_private_domain(self): + cs.dns_domains.create_private(self.testdomain, 'test_avzone') + cs.assert_called('PUT', '/os-floating-ip-dns/%s' % + self.testdomain) + + def test_create_public_domain(self): + cs.dns_domains.create_public(self.testdomain, 'test_project') + cs.assert_called('PUT', '/os-floating-ip-dns/%s' % + self.testdomain) + + def test_delete_domain(self): + cs.dns_domains.delete(self.testdomain) + cs.assert_called('DELETE', '/os-floating-ip-dns/%s' % + self.testdomain) + + +class FloatingIPDNSEntryTest(utils.TestCase): + + testname = "testname" + testip = "1.2.3.4" + testdomain = "testdomain" + testtype = "A" + + def test_get_dns_entries_by_ip(self): + entries = cs.dns_entries.get_for_ip(self.testdomain, ip=self.testip) + self.assertEqual(len(entries), 2) + + for entry in entries: + self.assertTrue(isinstance(entry, + floating_ip_dns.FloatingIPDNSEntry)) + + self.assertEqual(entries[1].dns_entry['name'], 'host2') + self.assertEqual(entries[1].dns_entry['ip'], self.testip) + + def test_get_dns_entry_by_name(self): + entry = cs.dns_entries.get(self.testdomain, + self.testname) + self.assertTrue(isinstance(entry, floating_ip_dns.FloatingIPDNSEntry)) + self.assertEqual(entry.name, self.testname) + + def test_create_entry(self): + cs.dns_entries.create(self.testdomain, + self.testname, + self.testip, + self.testtype) + + cs.assert_called('PUT', '/os-floating-ip-dns/%s/entries/%s' % + (self.testdomain, self.testname)) + + def test_delete_entry(self): + cs.dns_entries.delete(self.testdomain, self.testname) + cs.assert_called('DELETE', '/os-floating-ip-dns/%s/entries/%s' % + (self.testdomain, self.testname)) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ip_pools.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ip_pools.py new file mode 100644 index 0000000000..ba0fcb5176 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ip_pools.py @@ -0,0 +1,31 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1 import floating_ip_pools +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class TestFloatingIPPools(utils.TestCase): + + def test_list_floating_ips(self): + fl = cs.floating_ip_pools.list() + cs.assert_called('GET', '/os-floating-ip-pools') + [self.assertTrue(isinstance(f, floating_ip_pools.FloatingIPPool)) + for f in fl] diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ips.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ips.py new file mode 100644 index 0000000000..04088e0975 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ips.py @@ -0,0 +1,51 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1 import floating_ips +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class FloatingIPsTest(utils.TestCase): + + def test_list_floating_ips(self): + fl = cs.floating_ips.list() + cs.assert_called('GET', '/os-floating-ips') + [self.assertTrue(isinstance(f, floating_ips.FloatingIP)) for f in fl] + + def test_delete_floating_ip(self): + fl = cs.floating_ips.list()[0] + fl.delete() + cs.assert_called('DELETE', '/os-floating-ips/1') + cs.floating_ips.delete(1) + cs.assert_called('DELETE', '/os-floating-ips/1') + cs.floating_ips.delete(fl) + cs.assert_called('DELETE', '/os-floating-ips/1') + + def test_create_floating_ip(self): + fl = cs.floating_ips.create() + cs.assert_called('POST', '/os-floating-ips') + self.assertEqual(fl.pool, None) + self.assertTrue(isinstance(fl, floating_ips.FloatingIP)) + + def test_create_floating_ip_with_pool(self): + fl = cs.floating_ips.create('foo') + cs.assert_called('POST', '/os-floating-ips') + self.assertEqual(fl.pool, 'nova') + self.assertTrue(isinstance(fl, floating_ips.FloatingIP)) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ips_bulk.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ips_bulk.py new file mode 100644 index 0000000000..27acd5c7a5 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_floating_ips_bulk.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from novaclient.v1_1 import floating_ips_bulk +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class FloatingIPsBulkTest(utils.TestCase): + + def test_list_floating_ips_bulk(self): + fl = cs.floating_ips_bulk.list() + cs.assert_called('GET', '/os-floating-ips-bulk') + [self.assertTrue(isinstance(f, floating_ips_bulk.FloatingIP)) + for f in fl] + + def test_list_floating_ips_bulk_host_filter(self): + fl = cs.floating_ips_bulk.list('testHost') + cs.assert_called('GET', '/os-floating-ips-bulk/testHost') + [self.assertTrue(isinstance(f, floating_ips_bulk.FloatingIP)) + for f in fl] + + def test_create_floating_ips_bulk(self): + fl = cs.floating_ips_bulk.create('192.168.1.0/30') + body = {'floating_ips_bulk_create': {'ip_range': '192.168.1.0/30'}} + cs.assert_called('POST', '/os-floating-ips-bulk', body) + self.assertEqual(fl.ip_range, + body['floating_ips_bulk_create']['ip_range']) + + def test_create_floating_ips_bulk_with_pool_and_host(self): + fl = cs.floating_ips_bulk.create('192.168.1.0/30', 'poolTest', + 'interfaceTest') + body = {'floating_ips_bulk_create': + {'ip_range': '192.168.1.0/30', 'pool': 'poolTest', + 'interface': 'interfaceTest'}} + cs.assert_called('POST', '/os-floating-ips-bulk', body) + self.assertEqual(fl.ip_range, + body['floating_ips_bulk_create']['ip_range']) + self.assertEqual(fl.pool, + body['floating_ips_bulk_create']['pool']) + self.assertEqual(fl.interface, + body['floating_ips_bulk_create']['interface']) + + def test_delete_floating_ips_bulk(self): + fl = cs.floating_ips_bulk.delete('192.168.1.0/30') + body = {'ip_range': '192.168.1.0/30'} + cs.assert_called('PUT', '/os-floating-ips-bulk/delete', body) + self.assertEqual(fl.floating_ips_bulk_delete, body['ip_range']) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_fping.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_fping.py new file mode 100644 index 0000000000..7f240e2991 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_fping.py @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1 import fping +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class FpingTest(utils.TestCase): + + def test_fping_repr(self): + r = cs.fping.get(1) + self.assertEqual(repr(r), "") + + def test_list_fpings(self): + fl = cs.fping.list() + cs.assert_called('GET', '/os-fping') + for f in fl: + self.assertTrue(isinstance(f, fping.Fping)) + self.assertEqual(f.project_id, "fake-project") + self.assertEqual(f.alive, True) + + def test_list_fpings_all_tenants(self): + fl = cs.fping.list(all_tenants=True) + for f in fl: + self.assertTrue(isinstance(f, fping.Fping)) + cs.assert_called('GET', '/os-fping?all_tenants=1') + + def test_list_fpings_exclude(self): + fl = cs.fping.list(exclude=['1']) + for f in fl: + self.assertTrue(isinstance(f, fping.Fping)) + cs.assert_called('GET', '/os-fping?exclude=1') + + def test_list_fpings_include(self): + fl = cs.fping.list(include=['1']) + for f in fl: + self.assertTrue(isinstance(f, fping.Fping)) + cs.assert_called('GET', '/os-fping?include=1') + + def test_get_fping(self): + f = cs.fping.get(1) + cs.assert_called('GET', '/os-fping/1') + self.assertTrue(isinstance(f, fping.Fping)) + self.assertEqual(f.project_id, "fake-project") + self.assertEqual(f.alive, True) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_hosts.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_hosts.py new file mode 100644 index 0000000000..92d5bc9658 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_hosts.py @@ -0,0 +1,66 @@ +from novaclient.v1_1 import hosts +from novaclient.tests.v1_1 import fakes +from novaclient.tests import utils + + +cs = fakes.FakeClient() + + +class HostsTest(utils.TestCase): + + def test_describe_resource(self): + hs = cs.hosts.get('host') + cs.assert_called('GET', '/os-hosts/host') + [self.assertTrue(isinstance(h, hosts.Host)) for h in hs] + + def test_list_host(self): + hs = cs.hosts.list() + cs.assert_called('GET', '/os-hosts') + [self.assertTrue(isinstance(h, hosts.Host)) for h in hs] + [self.assertEqual(h.zone, 'nova1') for h in hs] + + def test_list_host_with_zone(self): + hs = cs.hosts.list('nova') + cs.assert_called('GET', '/os-hosts?zone=nova') + [self.assertTrue(isinstance(h, hosts.Host)) for h in hs] + [self.assertEqual(h.zone, 'nova') for h in hs] + + def test_update_enable(self): + host = cs.hosts.get('sample_host')[0] + values = {"status": "enabled"} + result = host.update(values) + cs.assert_called('PUT', '/os-hosts/sample_host', values) + self.assertTrue(isinstance(result, hosts.Host)) + + def test_update_maintenance(self): + host = cs.hosts.get('sample_host')[0] + values = {"maintenance_mode": "enable"} + result = host.update(values) + cs.assert_called('PUT', '/os-hosts/sample_host', values) + self.assertTrue(isinstance(result, hosts.Host)) + + def test_update_both(self): + host = cs.hosts.get('sample_host')[0] + values = {"status": "enabled", + "maintenance_mode": "enable"} + result = host.update(values) + cs.assert_called('PUT', '/os-hosts/sample_host', values) + self.assertTrue(isinstance(result, hosts.Host)) + + def test_host_startup(self): + host = cs.hosts.get('sample_host')[0] + result = host.startup() + cs.assert_called( + 'GET', '/os-hosts/sample_host/startup') + + def test_host_reboot(self): + host = cs.hosts.get('sample_host')[0] + result = host.reboot() + cs.assert_called( + 'GET', '/os-hosts/sample_host/reboot') + + def test_host_shutdown(self): + host = cs.hosts.get('sample_host')[0] + result = host.shutdown() + cs.assert_called( + 'GET', '/os-hosts/sample_host/shutdown') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_hypervisors.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_hypervisors.py new file mode 100644 index 0000000000..8d59d59843 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_hypervisors.py @@ -0,0 +1,170 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class HypervisorsTest(utils.TestCase): + def compare_to_expected(self, expected, hyper): + for key, value in expected.items(): + self.assertEqual(getattr(hyper, key), value) + + def test_hypervisor_index(self): + expected = [ + dict(id=1234, hypervisor_hostname='hyper1'), + dict(id=5678, hypervisor_hostname='hyper2'), + ] + + result = cs.hypervisors.list(False) + cs.assert_called('GET', '/os-hypervisors') + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) + + def test_hypervisor_detail(self): + expected = [ + dict(id=1234, + service=dict(id=1, host='compute1'), + vcpus=4, + memory_mb=10 * 1024, + local_gb=250, + vcpus_used=2, + memory_mb_used=5 * 1024, + local_gb_used=125, + hypervisor_type="xen", + hypervisor_version=3, + hypervisor_hostname="hyper1", + free_ram_mb=5 * 1024, + free_disk_gb=125, + current_workload=2, + running_vms=2, + cpu_info='cpu_info', + disk_available_least=100), + dict(id=2, + service=dict(id=2, host="compute2"), + vcpus=4, + memory_mb=10 * 1024, + local_gb=250, + vcpus_used=2, + memory_mb_used=5 * 1024, + local_gb_used=125, + hypervisor_type="xen", + hypervisor_version=3, + hypervisor_hostname="hyper2", + free_ram_mb=5 * 1024, + free_disk_gb=125, + current_workload=2, + running_vms=2, + cpu_info='cpu_info', + disk_available_least=100)] + + result = cs.hypervisors.list() + cs.assert_called('GET', '/os-hypervisors/detail') + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) + + def test_hypervisor_search(self): + expected = [ + dict(id=1234, hypervisor_hostname='hyper1'), + dict(id=5678, hypervisor_hostname='hyper2'), + ] + + result = cs.hypervisors.search('hyper') + cs.assert_called('GET', '/os-hypervisors/hyper/search') + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) + + def test_hypervisor_servers(self): + expected = [ + dict(id=1234, + hypervisor_hostname='hyper1', + servers=[ + dict(name='inst1', uuid='uuid1'), + dict(name='inst2', uuid='uuid2')]), + dict(id=5678, + hypervisor_hostname='hyper2', + servers=[ + dict(name='inst3', uuid='uuid3'), + dict(name='inst4', uuid='uuid4')]), + ] + + result = cs.hypervisors.search('hyper', True) + cs.assert_called('GET', '/os-hypervisors/hyper/servers') + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) + + def test_hypervisor_get(self): + expected = dict( + id=1234, + service=dict(id=1, host='compute1'), + vcpus=4, + memory_mb=10 * 1024, + local_gb=250, + vcpus_used=2, + memory_mb_used=5 * 1024, + local_gb_used=125, + hypervisor_type="xen", + hypervisor_version=3, + hypervisor_hostname="hyper1", + free_ram_mb=5 * 1024, + free_disk_gb=125, + current_workload=2, + running_vms=2, + cpu_info='cpu_info', + disk_available_least=100) + + result = cs.hypervisors.get(1234) + cs.assert_called('GET', '/os-hypervisors/1234') + + self.compare_to_expected(expected, result) + + def test_hypervisor_uptime(self): + expected = dict( + id=1234, + hypervisor_hostname="hyper1", + uptime="fake uptime") + + result = cs.hypervisors.uptime(1234) + cs.assert_called('GET', '/os-hypervisors/1234/uptime') + + self.compare_to_expected(expected, result) + + def test_hypervisor_statistics(self): + expected = dict( + count=2, + vcpus=8, + memory_mb=20 * 1024, + local_gb=500, + vcpus_used=4, + memory_mb_used=10 * 1024, + local_gb_used=250, + free_ram_mb=10 * 1024, + free_disk_gb=250, + current_workload=4, + running_vms=4, + disk_available_least=200, + ) + + result = cs.hypervisors.statistics() + cs.assert_called('GET', '/os-hypervisors/statistics') + + self.compare_to_expected(expected, result) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_images.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_images.py new file mode 100644 index 0000000000..5f9cfacd0c --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_images.py @@ -0,0 +1,53 @@ +from novaclient.v1_1 import images +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class ImagesTest(utils.TestCase): + + def test_list_images(self): + il = cs.images.list() + cs.assert_called('GET', '/images/detail') + [self.assertTrue(isinstance(i, images.Image)) for i in il] + + def test_list_images_undetailed(self): + il = cs.images.list(detailed=False) + cs.assert_called('GET', '/images') + [self.assertTrue(isinstance(i, images.Image)) for i in il] + + def test_list_images_with_limit(self): + il = cs.images.list(limit=4) + cs.assert_called('GET', '/images/detail?limit=4') + + def test_get_image_details(self): + i = cs.images.get(1) + cs.assert_called('GET', '/images/1') + self.assertTrue(isinstance(i, images.Image)) + self.assertEqual(i.id, 1) + self.assertEqual(i.name, 'CentOS 5.2') + + def test_delete_image(self): + cs.images.delete(1) + cs.assert_called('DELETE', '/images/1') + + def test_delete_meta(self): + cs.images.delete_meta(1, {'test_key': 'test_value'}) + cs.assert_called('DELETE', '/images/1/metadata/test_key') + + def test_set_meta(self): + cs.images.set_meta(1, {'test_key': 'test_value'}) + cs.assert_called('POST', '/images/1/metadata', + {"metadata": {'test_key': 'test_value'}}) + + def test_find(self): + i = cs.images.find(name="CentOS 5.2") + self.assertEqual(i.id, 1) + cs.assert_called('GET', '/images', pos=-2) + cs.assert_called('GET', '/images/1', pos=-1) + + iml = cs.images.findall(status='SAVING') + self.assertEqual(len(iml), 1) + self.assertEqual(iml[0].name, 'My Server Backup') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_keypairs.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_keypairs.py new file mode 100644 index 0000000000..64fbc0eadc --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_keypairs.py @@ -0,0 +1,39 @@ +from novaclient.v1_1 import keypairs +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class KeypairsTest(utils.TestCase): + + def test_get_keypair(self): + kp = cs.keypairs.get('test') + cs.assert_called('GET', '/os-keypairs/test') + self.assertTrue(isinstance(kp, keypairs.Keypair)) + self.assertEqual(kp.name, 'test') + + def test_list_keypairs(self): + kps = cs.keypairs.list() + cs.assert_called('GET', '/os-keypairs') + [self.assertTrue(isinstance(kp, keypairs.Keypair)) for kp in kps] + + def test_delete_keypair(self): + kp = cs.keypairs.list()[0] + kp.delete() + cs.assert_called('DELETE', '/os-keypairs/test') + cs.keypairs.delete('test') + cs.assert_called('DELETE', '/os-keypairs/test') + cs.keypairs.delete(kp) + cs.assert_called('DELETE', '/os-keypairs/test') + + def test_create_keypair(self): + kp = cs.keypairs.create("foo") + cs.assert_called('POST', '/os-keypairs') + self.assertTrue(isinstance(kp, keypairs.Keypair)) + + def test_import_keypair(self): + kp = cs.keypairs.create("foo", "fake-public-key") + cs.assert_called('POST', '/os-keypairs') + self.assertTrue(isinstance(kp, keypairs.Keypair)) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_limits.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_limits.py new file mode 100644 index 0000000000..517a6f1fb9 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_limits.py @@ -0,0 +1,75 @@ + +from novaclient.v1_1 import limits +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class LimitsTest(utils.TestCase): + + def test_get_limits(self): + obj = cs.limits.get() + cs.assert_called('GET', '/limits') + self.assertTrue(isinstance(obj, limits.Limits)) + + def test_get_limits_for_a_tenant(self): + obj = cs.limits.get(tenant_id=1234) + cs.assert_called('GET', '/limits?tenant_id=1234') + self.assertTrue(isinstance(obj, limits.Limits)) + + def test_absolute_limits(self): + obj = cs.limits.get() + + expected = ( + limits.AbsoluteLimit("maxTotalRAMSize", 51200), + limits.AbsoluteLimit("maxServerMeta", 5), + limits.AbsoluteLimit("maxImageMeta", 5), + limits.AbsoluteLimit("maxPersonality", 5), + limits.AbsoluteLimit("maxPersonalitySize", 10240), + ) + + abs_limits = list(obj.absolute) + self.assertEqual(len(abs_limits), len(expected)) + + for limit in abs_limits: + self.assertTrue(limit in expected) + + def test_absolute_limits_reserved(self): + obj = cs.limits.get(reserved=True) + + expected = ( + limits.AbsoluteLimit("maxTotalRAMSize", 51200), + limits.AbsoluteLimit("maxServerMeta", 5), + limits.AbsoluteLimit("maxImageMeta", 5), + limits.AbsoluteLimit("maxPersonality", 5), + limits.AbsoluteLimit("maxPersonalitySize", 10240), + ) + + cs.assert_called('GET', '/limits?reserved=1') + abs_limits = list(obj.absolute) + self.assertEqual(len(abs_limits), len(expected)) + + for limit in abs_limits: + self.assertTrue(limit in expected) + + def test_rate_limits(self): + obj = cs.limits.get() + + expected = ( + limits.RateLimit('POST', '*', '.*', 10, 2, 'MINUTE', + '2011-12-15T22:42:45Z'), + limits.RateLimit('PUT', '*', '.*', 10, 2, 'MINUTE', + '2011-12-15T22:42:45Z'), + limits.RateLimit('DELETE', '*', '.*', 100, 100, 'MINUTE', + '2011-12-15T22:42:45Z'), + limits.RateLimit('POST', '*/servers', '^/servers', 25, 24, 'DAY', + '2011-12-15T22:42:45Z'), + ) + + rate_limits = list(obj.rate) + self.assertEqual(len(rate_limits), len(expected)) + + for limit in rate_limits: + self.assertTrue(limit in expected) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_networks.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_networks.py new file mode 100644 index 0000000000..d40ec53f2e --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_networks.py @@ -0,0 +1,85 @@ +from novaclient.v1_1 import networks +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class NetworksTest(utils.TestCase): + + def test_list_networks(self): + fl = cs.networks.list() + cs.assert_called('GET', '/os-networks') + [self.assertTrue(isinstance(f, networks.Network)) for f in fl] + + def test_get_network(self): + f = cs.networks.get(1) + cs.assert_called('GET', '/os-networks/1') + self.assertTrue(isinstance(f, networks.Network)) + + def test_delete(self): + cs.networks.delete('networkdelete') + cs.assert_called('DELETE', '/os-networks/networkdelete') + + def test_create(self): + f = cs.networks.create(label='foo') + cs.assert_called('POST', '/os-networks', + {'network': {'label': 'foo'}}) + self.assertTrue(isinstance(f, networks.Network)) + + def test_create_allparams(self): + params = { + 'label': 'bar', + 'bridge': 'br0', + 'bridge_interface': 'int0', + 'cidr': '192.0.2.0/24', + 'cidr_v6': '2001:DB8::/32', + 'dns1': '1.1.1.1', + 'dns2': '1.1.1.2', + 'fixed_cidr': '198.51.100.0/24', + 'gateway': '192.0.2.1', + 'gateway_v6': '2001:DB8::1', + 'multi_host': 'T', + 'priority': '1', + 'project_id': '1', + 'vlan_start': 1, + 'vpn_start': 1 + } + + f = cs.networks.create(**params) + cs.assert_called('POST', '/os-networks', {'network': params}) + self.assertTrue(isinstance(f, networks.Network)) + + def test_associate_project(self): + cs.networks.associate_project('networktest') + cs.assert_called('POST', '/os-networks/add', + {'id': 'networktest'}) + + def test_associate_host(self): + cs.networks.associate_host('networktest', 'testHost') + cs.assert_called('POST', '/os-networks/networktest/action', + {'associate_host': 'testHost'}) + + def test_disassociate(self): + cs.networks.disassociate('networkdisassociate') + cs.assert_called('POST', + '/os-networks/networkdisassociate/action', + {'disassociate': None}) + + def test_disassociate_host_only(self): + cs.networks.disassociate('networkdisassociate', True, False) + cs.assert_called('POST', + '/os-networks/networkdisassociate/action', + {'disassociate_host': None}) + + def test_disassociate_project(self): + cs.networks.disassociate('networkdisassociate', False, True) + cs.assert_called('POST', + '/os-networks/networkdisassociate/action', + {'disassociate_project': None}) + + def test_add(self): + cs.networks.add('networkadd') + cs.assert_called('POST', '/os-networks/add', + {'id': 'networkadd'}) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_quota_classes.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_quota_classes.py new file mode 100644 index 0000000000..eceb606dae --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_quota_classes.py @@ -0,0 +1,42 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class QuotaClassSetsTest(utils.TestCase): + + def test_class_quotas_get(self): + class_name = 'test' + cs.quota_classes.get(class_name) + cs.assert_called('GET', '/os-quota-class-sets/%s' % class_name) + + def test_update_quota(self): + q = cs.quota_classes.get('test') + q.update(volumes=2) + cs.assert_called('PUT', '/os-quota-class-sets/test') + + def test_refresh_quota(self): + q = cs.quota_classes.get('test') + q2 = cs.quota_classes.get('test') + self.assertEqual(q.volumes, q2.volumes) + q2.volumes = 0 + self.assertNotEqual(q.volumes, q2.volumes) + q2.get() + self.assertEqual(q.volumes, q2.volumes) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_quotas.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_quotas.py new file mode 100644 index 0000000000..b545e6b186 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_quotas.py @@ -0,0 +1,83 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + +cs = fakes.FakeClient() + + +class QuotaSetsTest(utils.TestCase): + + def test_tenant_quotas_get(self): + tenant_id = 'test' + cs.quotas.get(tenant_id) + cs.assert_called('GET', '/os-quota-sets/%s' % tenant_id) + + def test_user_quotas_get(self): + tenant_id = 'test' + user_id = 'fake_user' + cs.quotas.get(tenant_id, user_id=user_id) + url = '/os-quota-sets/%s?user_id=%s' % (tenant_id, user_id) + cs.assert_called('GET', url) + + def test_tenant_quotas_defaults(self): + tenant_id = '97f4c221bff44578b0300df4ef119353' + cs.quotas.defaults(tenant_id) + cs.assert_called('GET', '/os-quota-sets/%s/defaults' % tenant_id) + + def test_update_quota(self): + q = cs.quotas.get('97f4c221bff44578b0300df4ef119353') + q.update(volumes=2) + cs.assert_called('PUT', + '/os-quota-sets/97f4c221bff44578b0300df4ef119353') + + def test_update_user_quota(self): + tenant_id = '97f4c221bff44578b0300df4ef119353' + user_id = 'fake_user' + q = cs.quotas.get(tenant_id) + q.update(volumes=2, user_id=user_id) + url = '/os-quota-sets/%s?user_id=%s' % (tenant_id, user_id) + cs.assert_called('PUT', url) + + def test_force_update_quota(self): + q = cs.quotas.get('97f4c221bff44578b0300df4ef119353') + q.update(cores=2, force=True) + cs.assert_called( + 'PUT', '/os-quota-sets/97f4c221bff44578b0300df4ef119353', + {'quota_set': {'force': True, + 'cores': 2, + 'tenant_id': '97f4c221bff44578b0300df4ef119353'}}) + + def test_refresh_quota(self): + q = cs.quotas.get('test') + q2 = cs.quotas.get('test') + self.assertEqual(q.volumes, q2.volumes) + q2.volumes = 0 + self.assertNotEqual(q.volumes, q2.volumes) + q2.get() + self.assertEqual(q.volumes, q2.volumes) + + def test_quotas_delete(self): + tenant_id = 'test' + cs.quotas.delete(tenant_id) + cs.assert_called('DELETE', '/os-quota-sets/%s' % tenant_id) + + def test_user_quotas_delete(self): + tenant_id = 'test' + user_id = 'fake_user' + cs.quotas.delete(tenant_id, user_id=user_id) + url = '/os-quota-sets/%s?user_id=%s' % (tenant_id, user_id) + cs.assert_called('DELETE', url) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_security_group_rules.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_security_group_rules.py new file mode 100644 index 0000000000..e1d015fa56 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_security_group_rules.py @@ -0,0 +1,68 @@ +from novaclient import exceptions +from novaclient.v1_1 import security_group_rules +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class SecurityGroupRulesTest(utils.TestCase): + def test_delete_security_group_rule(self): + cs.security_group_rules.delete(1) + cs.assert_called('DELETE', '/os-security-group-rules/1') + + def test_create_security_group_rule(self): + sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16") + + body = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": 1, + "to_port": 65535, + "cidr": "10.0.0.0/16", + "group_id": None, + "parent_group_id": 1, + } + } + + cs.assert_called('POST', '/os-security-group-rules', body) + self.assertTrue(isinstance(sg, security_group_rules.SecurityGroupRule)) + + def test_create_security_group_group_rule(self): + sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16", + 101) + + body = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": 1, + "to_port": 65535, + "cidr": "10.0.0.0/16", + "group_id": 101, + "parent_group_id": 1, + } + } + + cs.assert_called('POST', '/os-security-group-rules', body) + self.assertTrue(isinstance(sg, security_group_rules.SecurityGroupRule)) + + def test_invalid_parameters_create(self): + self.assertRaises(exceptions.CommandError, + cs.security_group_rules.create, + 1, "invalid_ip_protocol", 1, 65535, "10.0.0.0/16", 101) + self.assertRaises(exceptions.CommandError, + cs.security_group_rules.create, + 1, "tcp", "invalid_from_port", 65535, "10.0.0.0/16", 101) + self.assertRaises(exceptions.CommandError, + cs.security_group_rules.create, + 1, "tcp", 1, "invalid_to_port", "10.0.0.0/16", 101) + + def test_security_group_rule_str(self): + sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16") + self.assertEqual('1', str(sg)) + + def test_security_group_rule_del(self): + sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16") + sg.delete() + cs.assert_called('DELETE', '/os-security-group-rules/1') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_security_groups.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_security_groups.py new file mode 100644 index 0000000000..0040d3054a --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_security_groups.py @@ -0,0 +1,61 @@ +from novaclient.v1_1 import security_groups +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class SecurityGroupsTest(utils.TestCase): + def _do_test_list_security_groups(self, search_opts, path): + sgs = cs.security_groups.list(search_opts=search_opts) + cs.assert_called('GET', path) + for sg in sgs: + self.assertTrue(isinstance(sg, security_groups.SecurityGroup)) + + def test_list_security_groups_all_tenants_on(self): + self._do_test_list_security_groups( + None, '/os-security-groups') + + def test_list_security_groups_all_tenants_on(self): + self._do_test_list_security_groups( + {'all_tenants': 1}, '/os-security-groups?all_tenants=1') + + def test_list_security_groups_all_tenants_off(self): + self._do_test_list_security_groups( + {'all_tenants': 0}, '/os-security-groups') + + def test_get_security_groups(self): + sg = cs.security_groups.get(1) + cs.assert_called('GET', '/os-security-groups/1') + self.assertTrue(isinstance(sg, security_groups.SecurityGroup)) + self.assertEqual('1', str(sg)) + + def test_delete_security_group(self): + sg = cs.security_groups.list()[0] + sg.delete() + cs.assert_called('DELETE', '/os-security-groups/1') + cs.security_groups.delete(1) + cs.assert_called('DELETE', '/os-security-groups/1') + cs.security_groups.delete(sg) + cs.assert_called('DELETE', '/os-security-groups/1') + + def test_create_security_group(self): + sg = cs.security_groups.create("foo", "foo barr") + cs.assert_called('POST', '/os-security-groups') + self.assertTrue(isinstance(sg, security_groups.SecurityGroup)) + + def test_update_security_group(self): + sg = cs.security_groups.list()[0] + secgroup = cs.security_groups.update(sg, "update", "update") + cs.assert_called('PUT', '/os-security-groups/1') + self.assertTrue(isinstance(secgroup, security_groups.SecurityGroup)) + + def test_refresh_security_group(self): + sg = cs.security_groups.get(1) + sg2 = cs.security_groups.get(1) + self.assertEqual(sg.name, sg2.name) + sg2.name = "should be test" + self.assertNotEqual(sg.name, sg2.name) + sg2.get() + self.assertEqual(sg.name, sg2.name) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_servers.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_servers.py new file mode 100644 index 0000000000..76d3e3cd81 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_servers.py @@ -0,0 +1,533 @@ +# -*- coding: utf-8 -*- + +import mock +import six + +from novaclient import exceptions +from novaclient.v1_1 import servers +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class ServersTest(utils.TestCase): + + def test_list_servers(self): + sl = cs.servers.list() + cs.assert_called('GET', '/servers/detail') + [self.assertTrue(isinstance(s, servers.Server)) for s in sl] + + def test_list_servers_undetailed(self): + sl = cs.servers.list(detailed=False) + cs.assert_called('GET', '/servers') + [self.assertTrue(isinstance(s, servers.Server)) for s in sl] + + def test_list_servers_with_marker_limit(self): + sl = cs.servers.list(marker=1234, limit=2) + cs.assert_called('GET', '/servers/detail?marker=1234&limit=2') + for s in sl: + self.assertTrue(isinstance(s, servers.Server)) + + def test_get_server_details(self): + s = cs.servers.get(1234) + cs.assert_called('GET', '/servers/1234') + self.assertTrue(isinstance(s, servers.Server)) + self.assertEqual(s.id, 1234) + self.assertEqual(s.status, 'BUILD') + + def test_get_server_promote_details(self): + s1 = cs.servers.list(detailed=False)[0] + s2 = cs.servers.list(detailed=True)[0] + self.assertNotEquals(s1._info, s2._info) + s1.get() + self.assertEqual(s1._info, s2._info) + + def test_create_server(self): + s = cs.servers.create( + name="My server", + image=1, + flavor=1, + meta={'foo': 'bar'}, + userdata="hello moto", + key_name="fakekey", + files={ + '/etc/passwd': 'some data', # a file + '/tmp/foo.txt': six.StringIO('data'), # a stream + } + ) + cs.assert_called('POST', '/servers') + self.assertTrue(isinstance(s, servers.Server)) + + def test_create_server_boot_from_volume_with_nics(self): + old_boot = cs.servers._boot + + nics = [{'net-id': '11111111-1111-1111-1111-111111111111', + 'v4-fixed-ip': '10.0.0.7'}] + bdm = {"volume_size": "1", + "volume_id": "11111111-1111-1111-1111-111111111111", + "delete_on_termination": "0", + "device_name": "vda"} + + def wrapped_boot(url, key, *boot_args, **boot_kwargs): + self.assertEqual(boot_kwargs['block_device_mapping'], bdm) + self.assertEqual(boot_kwargs['nics'], nics) + return old_boot(url, key, *boot_args, **boot_kwargs) + + @mock.patch.object(cs.servers, '_boot', wrapped_boot) + def test_create_server_from_volume(): + s = cs.servers.create( + name="My server", + image=1, + flavor=1, + meta={'foo': 'bar'}, + userdata="hello moto", + key_name="fakekey", + block_device_mapping=bdm, + nics=nics + ) + cs.assert_called('POST', '/os-volumes_boot') + self.assertTrue(isinstance(s, servers.Server)) + + test_create_server_from_volume() + + def test_create_server_userdata_file_object(self): + s = cs.servers.create( + name="My server", + image=1, + flavor=1, + meta={'foo': 'bar'}, + userdata=six.StringIO('hello moto'), + files={ + '/etc/passwd': 'some data', # a file + '/tmp/foo.txt': six.StringIO('data'), # a stream + }, + ) + cs.assert_called('POST', '/servers') + self.assertTrue(isinstance(s, servers.Server)) + + def test_create_server_userdata_unicode(self): + s = cs.servers.create( + name="My server", + image=1, + flavor=1, + meta={'foo': 'bar'}, + userdata=six.u('ã“ã‚“ã«ã¡ã¯'), + key_name="fakekey", + files={ + '/etc/passwd': 'some data', # a file + '/tmp/foo.txt': six.StringIO('data'), # a stream + }, + ) + cs.assert_called('POST', '/servers') + self.assertTrue(isinstance(s, servers.Server)) + + def test_create_server_userdata_utf8(self): + s = cs.servers.create( + name="My server", + image=1, + flavor=1, + meta={'foo': 'bar'}, + userdata='ã“ã‚“ã«ã¡ã¯', + key_name="fakekey", + files={ + '/etc/passwd': 'some data', # a file + '/tmp/foo.txt': six.StringIO('data'), # a stream + }, + ) + cs.assert_called('POST', '/servers') + self.assertTrue(isinstance(s, servers.Server)) + + def _create_disk_config(self, disk_config): + s = cs.servers.create( + name="My server", + image=1, + flavor=1, + disk_config=disk_config + ) + cs.assert_called('POST', '/servers') + self.assertTrue(isinstance(s, servers.Server)) + + # verify disk config param was used in the request: + last_request = cs.client.callstack[-1] + body = last_request[-1] + server = body['server'] + self.assertTrue('OS-DCF:diskConfig' in server) + self.assertEqual(disk_config, server['OS-DCF:diskConfig']) + + def test_create_server_disk_config_auto(self): + self._create_disk_config('AUTO') + + def test_create_server_disk_config_manual(self): + self._create_disk_config('MANUAL') + + def test_update_server(self): + s = cs.servers.get(1234) + + # Update via instance + s.update(name='hi') + cs.assert_called('PUT', '/servers/1234') + s.update(name='hi') + cs.assert_called('PUT', '/servers/1234') + + # Silly, but not an error + s.update() + + # Update via manager + cs.servers.update(s, name='hi') + cs.assert_called('PUT', '/servers/1234') + + def test_delete_server(self): + s = cs.servers.get(1234) + s.delete() + cs.assert_called('DELETE', '/servers/1234') + cs.servers.delete(1234) + cs.assert_called('DELETE', '/servers/1234') + cs.servers.delete(s) + cs.assert_called('DELETE', '/servers/1234') + + def test_delete_server_meta(self): + s = cs.servers.delete_meta(1234, ['test_key']) + cs.assert_called('DELETE', '/servers/1234/metadata/test_key') + + def test_set_server_meta(self): + s = cs.servers.set_meta(1234, {'test_key': 'test_value'}) + reval = cs.assert_called('POST', '/servers/1234/metadata', + {'metadata': {'test_key': 'test_value'}}) + + def test_find(self): + server = cs.servers.find(name='sample-server') + cs.assert_called('GET', '/servers', pos=-2) + cs.assert_called('GET', '/servers/1234', pos=-1) + self.assertEqual(server.name, 'sample-server') + + self.assertRaises(exceptions.NoUniqueMatch, cs.servers.find, + flavor={"id": 1, "name": "256 MB Server"}) + + sl = cs.servers.findall(flavor={"id": 1, "name": "256 MB Server"}) + self.assertEqual([s.id for s in sl], [1234, 5678, 9012]) + + def test_reboot_server(self): + s = cs.servers.get(1234) + s.reboot() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.reboot(s, reboot_type='HARD') + cs.assert_called('POST', '/servers/1234/action') + + def test_rebuild_server(self): + s = cs.servers.get(1234) + s.rebuild(image=1) + cs.assert_called('POST', '/servers/1234/action') + cs.servers.rebuild(s, image=1) + cs.assert_called('POST', '/servers/1234/action') + s.rebuild(image=1, password='5678') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.rebuild(s, image=1, password='5678') + cs.assert_called('POST', '/servers/1234/action') + + def _rebuild_resize_disk_config(self, disk_config, operation="rebuild"): + s = cs.servers.get(1234) + + if operation == "rebuild": + s.rebuild(image=1, disk_config=disk_config) + elif operation == "resize": + s.resize(flavor=1, disk_config=disk_config) + cs.assert_called('POST', '/servers/1234/action') + + # verify disk config param was used in the request: + last_request = cs.client.callstack[-1] + body = last_request[-1] + + d = body[operation] + self.assertTrue('OS-DCF:diskConfig' in d) + self.assertEqual(disk_config, d['OS-DCF:diskConfig']) + + def test_rebuild_server_disk_config_auto(self): + self._rebuild_resize_disk_config('AUTO') + + def test_rebuild_server_disk_config_manual(self): + self._rebuild_resize_disk_config('MANUAL') + + def test_resize_server(self): + s = cs.servers.get(1234) + s.resize(flavor=1) + cs.assert_called('POST', '/servers/1234/action') + cs.servers.resize(s, flavor=1) + cs.assert_called('POST', '/servers/1234/action') + + def test_resize_server_disk_config_auto(self): + self._rebuild_resize_disk_config('AUTO', 'resize') + + def test_resize_server_disk_config_manual(self): + self._rebuild_resize_disk_config('MANUAL', 'resize') + + def test_confirm_resized_server(self): + s = cs.servers.get(1234) + s.confirm_resize() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.confirm_resize(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_revert_resized_server(self): + s = cs.servers.get(1234) + s.revert_resize() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.revert_resize(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_migrate_server(self): + s = cs.servers.get(1234) + s.migrate() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.migrate(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_add_fixed_ip(self): + s = cs.servers.get(1234) + s.add_fixed_ip(1) + cs.assert_called('POST', '/servers/1234/action') + cs.servers.add_fixed_ip(s, 1) + cs.assert_called('POST', '/servers/1234/action') + + def test_remove_fixed_ip(self): + s = cs.servers.get(1234) + s.remove_fixed_ip('10.0.0.1') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.remove_fixed_ip(s, '10.0.0.1') + cs.assert_called('POST', '/servers/1234/action') + + def test_add_floating_ip(self): + s = cs.servers.get(1234) + s.add_floating_ip('11.0.0.1') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.add_floating_ip(s, '11.0.0.1') + cs.assert_called('POST', '/servers/1234/action') + f = cs.floating_ips.list()[0] + cs.servers.add_floating_ip(s, f) + cs.assert_called('POST', '/servers/1234/action') + s.add_floating_ip(f) + cs.assert_called('POST', '/servers/1234/action') + + def test_add_floating_ip_to_fixed(self): + s = cs.servers.get(1234) + s.add_floating_ip('11.0.0.1', fixed_address='12.0.0.1') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.add_floating_ip(s, '11.0.0.1', + fixed_address='12.0.0.1') + cs.assert_called('POST', '/servers/1234/action') + f = cs.floating_ips.list()[0] + cs.servers.add_floating_ip(s, f) + cs.assert_called('POST', '/servers/1234/action') + s.add_floating_ip(f) + cs.assert_called('POST', '/servers/1234/action') + + def test_remove_floating_ip(self): + s = cs.servers.get(1234) + s.remove_floating_ip('11.0.0.1') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.remove_floating_ip(s, '11.0.0.1') + cs.assert_called('POST', '/servers/1234/action') + f = cs.floating_ips.list()[0] + cs.servers.remove_floating_ip(s, f) + cs.assert_called('POST', '/servers/1234/action') + s.remove_floating_ip(f) + cs.assert_called('POST', '/servers/1234/action') + + def test_stop(self): + s = cs.servers.get(1234) + s.stop() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.stop(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_force_delete(self): + s = cs.servers.get(1234) + s.force_delete() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.force_delete(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_restore(self): + s = cs.servers.get(1234) + s.restore() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.restore(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_start(self): + s = cs.servers.get(1234) + s.start() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.start(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_rescue(self): + s = cs.servers.get(1234) + s.rescue() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.rescue(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_unrescue(self): + s = cs.servers.get(1234) + s.unrescue() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.unrescue(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_lock(self): + s = cs.servers.get(1234) + s.lock() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.lock(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_unlock(self): + s = cs.servers.get(1234) + s.unlock() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.unlock(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_backup(self): + s = cs.servers.get(1234) + s.backup('back1', 'daily', 1) + cs.assert_called('POST', '/servers/1234/action') + cs.servers.backup(s, 'back1', 'daily', 2) + cs.assert_called('POST', '/servers/1234/action') + + def test_get_console_output_without_length(self): + success = 'foo' + s = cs.servers.get(1234) + s.get_console_output() + self.assertEqual(s.get_console_output(), success) + cs.assert_called('POST', '/servers/1234/action') + + cs.servers.get_console_output(s) + self.assertEqual(cs.servers.get_console_output(s), success) + cs.assert_called('POST', '/servers/1234/action') + + def test_get_console_output_with_length(self): + success = 'foo' + + s = cs.servers.get(1234) + s.get_console_output(length=50) + self.assertEqual(s.get_console_output(length=50), success) + cs.assert_called('POST', '/servers/1234/action') + + cs.servers.get_console_output(s, length=50) + self.assertEqual(cs.servers.get_console_output(s, length=50), success) + cs.assert_called('POST', '/servers/1234/action') + + def test_get_password(self): + s = cs.servers.get(1234) + self.assertEqual(s.get_password('/foo/id_rsa'), '') + cs.assert_called('GET', '/servers/1234/os-server-password') + + def test_clear_password(self): + s = cs.servers.get(1234) + s.clear_password() + cs.assert_called('DELETE', '/servers/1234/os-server-password') + + def test_get_server_diagnostics(self): + s = cs.servers.get(1234) + diagnostics = s.diagnostics() + self.assertTrue(diagnostics is not None) + cs.assert_called('GET', '/servers/1234/diagnostics') + + diagnostics_from_manager = cs.servers.diagnostics(1234) + self.assertTrue(diagnostics_from_manager is not None) + cs.assert_called('GET', '/servers/1234/diagnostics') + + self.assertEqual(diagnostics, diagnostics_from_manager) + + def test_get_vnc_console(self): + s = cs.servers.get(1234) + s.get_vnc_console('fake') + cs.assert_called('POST', '/servers/1234/action') + + cs.servers.get_vnc_console(s, 'fake') + cs.assert_called('POST', '/servers/1234/action') + + def test_get_spice_console(self): + s = cs.servers.get(1234) + s.get_spice_console('fake') + cs.assert_called('POST', '/servers/1234/action') + + cs.servers.get_spice_console(s, 'fake') + cs.assert_called('POST', '/servers/1234/action') + + def test_create_image(self): + s = cs.servers.get(1234) + s.create_image('123') + cs.assert_called('POST', '/servers/1234/action') + s.create_image('123', {}) + cs.assert_called('POST', '/servers/1234/action') + cs.servers.create_image(s, '123') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.create_image(s, '123', {}) + + def test_live_migrate_server(self): + s = cs.servers.get(1234) + s.live_migrate(host='hostname', block_migration=False, + disk_over_commit=False) + cs.assert_called('POST', '/servers/1234/action') + cs.servers.live_migrate(s, host='hostname', block_migration=False, + disk_over_commit=False) + cs.assert_called('POST', '/servers/1234/action') + + def test_reset_state(self): + s = cs.servers.get(1234) + s.reset_state('newstate') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.reset_state(s, 'newstate') + cs.assert_called('POST', '/servers/1234/action') + + def test_reset_network(self): + s = cs.servers.get(1234) + s.reset_network() + cs.assert_called('POST', '/servers/1234/action') + cs.servers.reset_network(s) + cs.assert_called('POST', '/servers/1234/action') + + def test_add_security_group(self): + s = cs.servers.get(1234) + s.add_security_group('newsg') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.add_security_group(s, 'newsg') + cs.assert_called('POST', '/servers/1234/action') + + def test_remove_security_group(self): + s = cs.servers.get(1234) + s.remove_security_group('oldsg') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.remove_security_group(s, 'oldsg') + cs.assert_called('POST', '/servers/1234/action') + + def test_list_security_group(self): + s = cs.servers.get(1234) + s.list_security_group() + cs.assert_called('GET', '/servers/1234/os-security-groups') + + def test_evacuate(self): + s = cs.servers.get(1234) + s.evacuate('fake_target_host', 'True') + cs.assert_called('POST', '/servers/1234/action') + cs.servers.evacuate(s, 'fake_target_host', 'False', 'NewAdminPassword') + cs.assert_called('POST', '/servers/1234/action') + + def test_interface_list(self): + s = cs.servers.get(1234) + s.interface_list() + cs.assert_called('GET', '/servers/1234/os-interface') + + def test_interface_attach(self): + s = cs.servers.get(1234) + s.interface_attach(None, None, None) + cs.assert_called('POST', '/servers/1234/os-interface') + + def test_interface_detach(self): + s = cs.servers.get(1234) + s.interface_detach('port-id') + cs.assert_called('DELETE', '/servers/1234/os-interface/port-id') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_services.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_services.py new file mode 100644 index 0000000000..ddd43bc8a4 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_services.py @@ -0,0 +1,77 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1 import services +from novaclient.tests.v1_1 import fakes +from novaclient.tests import utils + + +cs = fakes.FakeClient() + + +class ServicesTest(utils.TestCase): + + def test_list_services(self): + svs = cs.services.list() + cs.assert_called('GET', '/os-services') + [self.assertTrue(isinstance(s, services.Service)) for s in svs] + [self.assertEqual(s.binary, 'nova-compute') for s in svs] + [self.assertEqual(s.host, 'host1') for s in svs] + + def test_list_services_with_hostname(self): + svs = cs.services.list(host='host2') + cs.assert_called('GET', '/os-services?host=host2') + [self.assertTrue(isinstance(s, services.Service)) for s in svs] + [self.assertEqual(s.binary, 'nova-compute') for s in svs] + [self.assertEqual(s.host, 'host2') for s in svs] + + def test_list_services_with_binary(self): + svs = cs.services.list(binary='nova-cert') + cs.assert_called('GET', '/os-services?binary=nova-cert') + [self.assertTrue(isinstance(s, services.Service)) for s in svs] + [self.assertEqual(s.binary, 'nova-cert') for s in svs] + [self.assertEqual(s.host, 'host1') for s in svs] + + def test_list_services_with_host_binary(self): + svs = cs.services.list(host='host2', binary='nova-cert') + cs.assert_called('GET', '/os-services?host=host2&binary=nova-cert') + [self.assertTrue(isinstance(s, services.Service)) for s in svs] + [self.assertEqual(s.binary, 'nova-cert') for s in svs] + [self.assertEqual(s.host, 'host2') for s in svs] + + def test_services_enable(self): + service = cs.services.enable('host1', 'nova-cert') + values = {"host": "host1", 'binary': 'nova-cert'} + cs.assert_called('PUT', '/os-services/enable', values) + self.assertTrue(isinstance(service, services.Service)) + self.assertEqual(service.status, 'enabled') + + def test_services_disable(self): + service = cs.services.disable('host1', 'nova-cert') + values = {"host": "host1", 'binary': 'nova-cert'} + cs.assert_called('PUT', '/os-services/disable', values) + self.assertTrue(isinstance(service, services.Service)) + self.assertEqual(service.status, 'disabled') + + def test_services_disable_log_reason(self): + service = cs.services.disable_log_reason('compute1', 'nova-compute', + 'disable bad host') + values = {'host': 'compute1', 'binary': 'nova-compute', + 'disabled_reason': 'disable bad host'} + cs.assert_called('PUT', '/os-services/disable-log-reason', values) + self.assertTrue(isinstance(service, services.Service)) + self.assertEqual(service.status, 'disabled') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_shell.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_shell.py new file mode 100644 index 0000000000..d7a5e0d38f --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_shell.py @@ -0,0 +1,1799 @@ +# Copyright 2010 Jacob Kaplan-Moss + +# Copyright 2011 OpenStack Foundation +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import os +import mock +import sys +import tempfile + +import fixtures +import six + +import novaclient.client +from novaclient import exceptions +from novaclient.openstack.common import timeutils +import novaclient.shell +from novaclient.tests.v1_1 import fakes +from novaclient.tests import utils + + +class ShellFixture(fixtures.Fixture): + def setUp(self): + super(ShellFixture, self).setUp() + self.shell = novaclient.shell.OpenStackComputeShell() + + def tearDown(self): + # For some method like test_image_meta_bad_action we are + # testing a SystemExit to be thrown and object self.shell has + # no time to get instantatiated which is OK in this case, so + # we make sure the method is there before launching it. + if hasattr(self.shell, 'cs'): + self.shell.cs.clear_callstack() + super(ShellFixture, self).tearDown() + + +class ShellTest(utils.TestCase): + FAKE_ENV = { + 'NOVA_USERNAME': 'username', + 'NOVA_PASSWORD': 'password', + 'NOVA_PROJECT_ID': 'project_id', + 'OS_COMPUTE_API_VERSION': '1.1', + 'NOVA_URL': 'http://no.where', + } + + def setUp(self): + """Run before each test.""" + super(ShellTest, self).setUp() + + for var in self.FAKE_ENV: + self.useFixture(fixtures.EnvironmentVariable(var, + self.FAKE_ENV[var])) + self.shell = self.useFixture(ShellFixture()).shell + + self.useFixture(fixtures.MonkeyPatch( + 'novaclient.client.get_client_class', + lambda *_: fakes.FakeClient)) + self.addCleanup(timeutils.clear_time_override) + + @mock.patch('sys.stdout', six.StringIO()) + def run_command(self, cmd): + if isinstance(cmd, list): + self.shell.main(cmd) + else: + self.shell.main(cmd.split()) + return sys.stdout.getvalue() + + def assert_called(self, method, url, body=None, **kwargs): + return self.shell.cs.assert_called(method, url, body, **kwargs) + + def assert_called_anytime(self, method, url, body=None): + return self.shell.cs.assert_called_anytime(method, url, body) + + def test_agents_list_with_hypervisor(self): + self.run_command('agent-list --hypervisor xen') + self.assert_called('GET', '/os-agents?hypervisor=xen') + + def test_agents_create(self): + self.run_command('agent-create win x86 7.0 ' + '/xxx/xxx/xxx ' + 'add6bb58e139be103324d04d82d8f546 ' + 'kvm') + self.assert_called( + 'POST', '/os-agents', + {'agent': { + 'hypervisor': 'kvm', + 'os': 'win', + 'architecture': 'x86', + 'version': '7.0', + 'url': '/xxx/xxx/xxx', + 'md5hash': 'add6bb58e139be103324d04d82d8f546'}}) + + def test_agents_delete(self): + self.run_command('agent-delete 1') + self.assert_called('DELETE', '/os-agents/1') + + def test_agents_modify(self): + self.run_command('agent-modify 1 8.0 /yyy/yyyy/yyyy ' + 'add6bb58e139be103324d04d82d8f546') + self.assert_called('PUT', '/os-agents/1', + {"para": { + "url": "/yyy/yyyy/yyyy", + "version": "8.0", + "md5hash": "add6bb58e139be103324d04d82d8f546"}}) + + def test_boot(self): + self.run_command('boot --flavor 1 --image 1 some-server') + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + def test_boot_multiple(self): + self.run_command('boot --flavor 1 --image 1' + ' --num-instances 3 some-server') + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 3, + }}, + ) + + def test_boot_image_with(self): + self.run_command("boot --flavor 1" + " --image-with test_key=test_value some-server") + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + def test_boot_key(self): + self.run_command('boot --flavor 1 --image 1 --key_name 1 some-server') + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'key_name': '1', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + def test_boot_user_data(self): + testfile = os.path.join(os.path.dirname(__file__), 'testfile.txt') + expected_file_data = open(testfile).read().encode('base64').strip() + self.run_command( + 'boot --flavor 1 --image 1 --user_data %s some-server' % testfile) + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + 'user_data': expected_file_data + }}, + ) + + def test_boot_avzone(self): + self.run_command( + 'boot --flavor 1 --image 1 --availability-zone avzone ' + 'some-server') + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'availability_zone': 'avzone', + 'min_count': 1, + 'max_count': 1 + }}, + ) + + def test_boot_secgroup(self): + self.run_command( + 'boot --flavor 1 --image 1 --security-groups secgroup1,' + 'secgroup2 some-server') + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'security_groups': [{'name': 'secgroup1'}, + {'name': 'secgroup2'}], + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + def test_boot_config_drive(self): + self.run_command( + 'boot --flavor 1 --image 1 --config-drive 1 some-server') + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + 'config_drive': True + }}, + ) + + def test_boot_config_drive_custom(self): + self.run_command( + 'boot --flavor 1 --image 1 --config-drive /dev/hda some-server') + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + 'config_drive': '/dev/hda' + }}, + ) + + def test_boot_invalid_user_data(self): + invalid_file = os.path.join(os.path.dirname(__file__), + 'no_such_file') + cmd = ('boot some-server --flavor 1 --image 1' + ' --user_data %s' % invalid_file) + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_boot_no_image_no_bdms(self): + cmd = 'boot --flavor 1 some-server' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_boot_no_flavor(self): + cmd = 'boot --image 1 some-server' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_boot_no_image_bdms(self): + self.run_command( + 'boot --flavor 1 --block_device_mapping vda=blah:::0 some-server' + ) + self.assert_called_anytime( + 'POST', '/os-volumes_boot', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'block_device_mapping': [ + { + 'volume_id': 'blah', + 'delete_on_termination': '0', + 'device_name': 'vda' + } + ], + 'imageRef': '', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + def test_boot_image_bdms_v2(self): + self.run_command( + 'boot --flavor 1 --image 1 --block-device id=fake-id,' + 'source=volume,dest=volume,device=vda,size=1,format=ext4,' + 'type=disk,shutdown=preserve some-server' + ) + self.assert_called_anytime( + 'POST', '/os-volumes_boot', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'block_device_mapping_v2': [ + { + 'uuid': 1, + 'source_type': 'image', + 'destination_type': 'local', + 'boot_index': 0, + 'delete_on_termination': True, + }, + { + 'uuid': 'fake-id', + 'source_type': 'volume', + 'destination_type': 'volume', + 'device_name': 'vda', + 'volume_size': '1', + 'guest_format': 'ext4', + 'device_type': 'disk', + 'delete_on_termination': False, + }, + ], + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + def test_boot_no_image_bdms_v2(self): + self.run_command( + 'boot --flavor 1 --block-device id=fake-id,source=volume,' + 'dest=volume,bus=virtio,device=vda,size=1,format=ext4,bootindex=0,' + 'type=disk,shutdown=preserve some-server' + ) + self.assert_called_anytime( + 'POST', '/os-volumes_boot', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'block_device_mapping_v2': [ + { + 'uuid': 'fake-id', + 'source_type': 'volume', + 'destination_type': 'volume', + 'disk_bus': 'virtio', + 'device_name': 'vda', + 'volume_size': '1', + 'guest_format': 'ext4', + 'boot_index': '0', + 'device_type': 'disk', + 'delete_on_termination': False, + } + ], + 'imageRef': '', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + cmd = 'boot --flavor 1 --boot-volume fake-id some-server' + self.run_command(cmd) + self.assert_called_anytime( + 'POST', '/os-volumes_boot', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'block_device_mapping_v2': [ + { + 'uuid': 'fake-id', + 'source_type': 'volume', + 'destination_type': 'volume', + 'boot_index': 0, + 'delete_on_termination': False, + } + ], + 'imageRef': '', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + cmd = 'boot --flavor 1 --snapshot fake-id some-server' + self.run_command(cmd) + self.assert_called_anytime( + 'POST', '/os-volumes_boot', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'block_device_mapping_v2': [ + { + 'uuid': 'fake-id', + 'source_type': 'snapshot', + 'destination_type': 'volume', + 'boot_index': 0, + 'delete_on_termination': False, + } + ], + 'imageRef': '', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + self.run_command('boot --flavor 1 --swap 1 some-server') + self.assert_called_anytime( + 'POST', '/os-volumes_boot', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'block_device_mapping_v2': [ + { + 'source_type': 'blank', + 'destination_type': 'local', + 'boot_index': -1, + 'guest_format': 'swap', + 'volume_size': '1', + 'delete_on_termination': True, + } + ], + 'imageRef': '', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + self.run_command( + 'boot --flavor 1 --ephemeral size=1,format=ext4 some-server' + ) + self.assert_called_anytime( + 'POST', '/os-volumes_boot', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'block_device_mapping_v2': [ + { + 'source_type': 'blank', + 'destination_type': 'local', + 'boot_index': -1, + 'guest_format': 'ext4', + 'volume_size': '1', + 'delete_on_termination': True, + } + ], + 'imageRef': '', + 'min_count': 1, + 'max_count': 1, + }}, + ) + + def test_boot_metadata(self): + self.run_command('boot --image 1 --flavor 1 --meta foo=bar=pants' + ' --meta spam=eggs some-server ') + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'metadata': {'foo': 'bar=pants', 'spam': 'eggs'}, + 'min_count': 1, + 'max_count': 1, + }}, + ) + + def test_boot_hints(self): + self.run_command('boot --image 1 --flavor 1 --hint a=b=c some-server ') + self.assert_called_anytime( + 'POST', '/servers', + { + 'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + }, + 'os:scheduler_hints': {'a': 'b=c'}, + }, + ) + + def test_boot_nics(self): + cmd = ('boot --image 1 --flavor 1 ' + '--nic net-id=a=c,v4-fixed-ip=10.0.0.1 some-server') + self.run_command(cmd) + self.assert_called_anytime( + 'POST', '/servers', + { + 'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + 'networks': [ + {'uuid': 'a=c', 'fixed_ip': '10.0.0.1'}, + ], + }, + }, + ) + + def tets_boot_nics_no_value(self): + cmd = ('boot --image 1 --flavor 1 ' + '--nic net-id some-server') + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_boot_nics_random_key(self): + cmd = ('boot --image 1 --flavor 1 ' + '--nic net-id=a=c,v4-fixed-ip=10.0.0.1,foo=bar some-server') + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_boot_nics_no_netid_or_portid(self): + cmd = ('boot --image 1 --flavor 1 ' + '--nic v4-fixed-ip=10.0.0.1 some-server') + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_boot_files(self): + testfile = os.path.join(os.path.dirname(__file__), 'testfile.txt') + expected_file_data = open(testfile).read().encode('base64') + + cmd = ('boot some-server --flavor 1 --image 1' + ' --file /tmp/foo=%s --file /tmp/bar=%s') + self.run_command(cmd % (testfile, testfile)) + + self.assert_called_anytime( + 'POST', '/servers', + {'server': { + 'flavorRef': '1', + 'name': 'some-server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 1, + 'personality': [ + {'path': '/tmp/bar', 'contents': expected_file_data}, + {'path': '/tmp/foo', 'contents': expected_file_data}, + ] + }}, + ) + + def test_boot_invalid_files(self): + invalid_file = os.path.join(os.path.dirname(__file__), + 'asdfasdfasdfasdf') + cmd = ('boot some-server --flavor 1 --image 1' + ' --file /foo=%s' % invalid_file) + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_boot_num_instances(self): + self.run_command('boot --image 1 --flavor 1 --num-instances 3 server') + self.assert_called_anytime( + 'POST', '/servers', + { + 'server': { + 'flavorRef': '1', + 'name': 'server', + 'imageRef': '1', + 'min_count': 1, + 'max_count': 3, + } + }) + + def test_boot_invalid_num_instances(self): + cmd = 'boot --image 1 --flavor 1 --num-instances 1 server' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + cmd = 'boot --image 1 --flavor 1 --num-instances 0 server' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_flavor_list(self): + self.run_command('flavor-list') + self.assert_called_anytime('GET', '/flavors/detail') + + def test_flavor_list_with_extra_specs(self): + self.run_command('flavor-list --extra-specs') + self.assert_called('GET', '/flavors/aa1/os-extra_specs') + self.assert_called_anytime('GET', '/flavors/detail') + + def test_flavor_list_with_all(self): + self.run_command('flavor-list --all') + self.assert_called('GET', '/flavors/detail?is_public=None') + + def test_flavor_show(self): + self.run_command('flavor-show 1') + self.assert_called_anytime('GET', '/flavors/1') + + def test_flavor_show_with_alphanum_id(self): + self.run_command('flavor-show aa1') + self.assert_called_anytime('GET', '/flavors/aa1') + + def test_flavor_key_set(self): + self.run_command('flavor-key 1 set k1=v1') + self.assert_called('POST', '/flavors/1/os-extra_specs', + {'extra_specs': {'k1': 'v1'}}) + + def test_flavor_key_unset(self): + self.run_command('flavor-key 1 unset k1') + self.assert_called('DELETE', '/flavors/1/os-extra_specs/k1') + + def test_flavor_access_list_flavor(self): + self.run_command('flavor-access-list --flavor 2') + self.assert_called('GET', '/flavors/2/os-flavor-access') + + # FIXME: flavor-access-list is not implemented yet + # def test_flavor_access_list_tenant(self): + # self.run_command('flavor-access-list --tenant proj2') + # self.assert_called('GET', '/flavors/2/os-flavor-access') + + def test_flavor_access_list_bad_filter(self): + cmd = 'flavor-access-list --flavor 2 --tenant proj2' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_flavor_access_list_no_filter(self): + cmd = 'flavor-access-list' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_flavor_access_add_by_id(self): + self.run_command('flavor-access-add 2 proj2') + self.assert_called('POST', '/flavors/2/action', + {'addTenantAccess': {'tenant': 'proj2'}}) + + def test_flavor_access_add_by_name(self): + self.run_command(['flavor-access-add', '512 MB Server', 'proj2']) + self.assert_called('POST', '/flavors/2/action', + {'addTenantAccess': {'tenant': 'proj2'}}) + + def test_flavor_access_remove_by_id(self): + self.run_command('flavor-access-remove 2 proj2') + self.assert_called('POST', '/flavors/2/action', + {'removeTenantAccess': {'tenant': 'proj2'}}) + + def test_flavor_access_remove_by_name(self): + self.run_command(['flavor-access-remove', '512 MB Server', 'proj2']) + self.assert_called('POST', '/flavors/2/action', + {'removeTenantAccess': {'tenant': 'proj2'}}) + + def test_image_show(self): + self.run_command('image-show 1') + self.assert_called('GET', '/images/1') + + def test_image_meta_set(self): + self.run_command('image-meta 1 set test_key=test_value') + self.assert_called('POST', '/images/1/metadata', + {'metadata': {'test_key': 'test_value'}}) + + def test_image_meta_del(self): + self.run_command('image-meta 1 delete test_key=test_value') + self.assert_called('DELETE', '/images/1/metadata/test_key') + + def test_image_meta_bad_action(self): + tmp = tempfile.TemporaryFile() + + # Suppress stdout and stderr + (stdout, stderr) = (sys.stdout, sys.stderr) + (sys.stdout, sys.stderr) = (tmp, tmp) + + self.assertRaises(SystemExit, self.run_command, + 'image-meta 1 BAD_ACTION test_key=test_value') + + # Put stdout and stderr back + sys.stdout, sys.stderr = (stdout, stderr) + + def test_image_list(self): + self.run_command('image-list') + self.assert_called('GET', '/images/detail') + + def test_create_image(self): + self.run_command('image-create sample-server mysnapshot') + self.assert_called( + 'POST', '/servers/1234/action', + {'createImage': {'name': 'mysnapshot', 'metadata': {}}}, + ) + + def test_image_delete(self): + self.run_command('image-delete 1') + self.assert_called('DELETE', '/images/1') + + def test_image_delete_multiple(self): + self.run_command('image-delete 1 2') + self.assert_called('DELETE', '/images/1', pos=-3) + self.assert_called('DELETE', '/images/2', pos=-1) + + def test_list(self): + self.run_command('list') + self.assert_called('GET', '/servers/detail') + + def test_list_with_images(self): + self.run_command('list --image 1') + self.assert_called('GET', '/servers/detail?image=1') + + def test_list_with_flavors(self): + self.run_command('list --flavor 1') + self.assert_called('GET', '/servers/detail?flavor=1') + + def test_list_fields(self): + output = self.run_command('list --fields ' + 'host,security_groups,OS-EXT-MOD:some_thing') + self.assert_called('GET', '/servers/detail') + self.assertIn('computenode1', output) + self.assertIn('securitygroup1', output) + self.assertIn('OS-EXT-MOD: Some Thing', output) + self.assertIn('mod_some_thing_value', output) + + def test_reboot(self): + self.run_command('reboot sample-server') + self.assert_called('POST', '/servers/1234/action', + {'reboot': {'type': 'SOFT'}}) + self.run_command('reboot sample-server --hard') + self.assert_called('POST', '/servers/1234/action', + {'reboot': {'type': 'HARD'}}) + + def test_rebuild(self): + self.run_command('rebuild sample-server 1') + self.assert_called('GET', '/servers', pos=-8) + self.assert_called('GET', '/servers/1234', pos=-7) + self.assert_called('GET', '/images/1', pos=-6) + self.assert_called('POST', '/servers/1234/action', + {'rebuild': {'imageRef': 1}}, pos=-5) + self.assert_called('GET', '/flavors/1', pos=-2) + self.assert_called('GET', '/images/2') + + self.run_command('rebuild sample-server 1 --rebuild-password asdf') + self.assert_called('GET', '/servers', pos=-8) + self.assert_called('GET', '/servers/1234', pos=-7) + self.assert_called('GET', '/images/1', pos=-6) + self.assert_called('POST', '/servers/1234/action', + {'rebuild': {'imageRef': 1, 'adminPass': 'asdf'}}, + pos=-5) + self.assert_called('GET', '/flavors/1', pos=-2) + self.assert_called('GET', '/images/2') + + def test_start(self): + self.run_command('start sample-server') + self.assert_called('POST', '/servers/1234/action', {'os-start': None}) + + def test_stop(self): + self.run_command('stop sample-server') + self.assert_called('POST', '/servers/1234/action', {'os-stop': None}) + + def test_pause(self): + self.run_command('pause sample-server') + self.assert_called('POST', '/servers/1234/action', {'pause': None}) + + def test_unpause(self): + self.run_command('unpause sample-server') + self.assert_called('POST', '/servers/1234/action', {'unpause': None}) + + def test_lock(self): + self.run_command('lock sample-server') + self.assert_called('POST', '/servers/1234/action', {'lock': None}) + + def test_unlock(self): + self.run_command('unlock sample-server') + self.assert_called('POST', '/servers/1234/action', {'unlock': None}) + + def test_suspend(self): + self.run_command('suspend sample-server') + self.assert_called('POST', '/servers/1234/action', {'suspend': None}) + + def test_resume(self): + self.run_command('resume sample-server') + self.assert_called('POST', '/servers/1234/action', {'resume': None}) + + def test_rescue(self): + self.run_command('rescue sample-server') + self.assert_called('POST', '/servers/1234/action', {'rescue': None}) + + def test_unrescue(self): + self.run_command('unrescue sample-server') + self.assert_called('POST', '/servers/1234/action', {'unrescue': None}) + + def test_migrate(self): + self.run_command('migrate sample-server') + self.assert_called('POST', '/servers/1234/action', {'migrate': None}) + + def test_rename(self): + self.run_command('rename sample-server newname') + self.assert_called('PUT', '/servers/1234', + {'server': {'name': 'newname'}}) + + def test_resize(self): + self.run_command('resize sample-server 1') + self.assert_called('POST', '/servers/1234/action', + {'resize': {'flavorRef': 1}}) + + def test_resize_confirm(self): + self.run_command('resize-confirm sample-server') + self.assert_called('POST', '/servers/1234/action', + {'confirmResize': None}) + + def test_resize_revert(self): + self.run_command('resize-revert sample-server') + self.assert_called('POST', '/servers/1234/action', + {'revertResize': None}) + + @mock.patch('getpass.getpass', mock.Mock(return_value='p')) + def test_root_password(self): + self.run_command('root-password sample-server') + self.assert_called('POST', '/servers/1234/action', + {'changePassword': {'adminPass': 'p'}}) + + def test_scrub(self): + self.run_command('scrub 4ffc664c198e435e9853f2538fbcd7a7') + self.assert_called('GET', '/os-networks', pos=-4) + self.assert_called('GET', '/os-security-groups?all_tenants=1', + pos=-3) + self.assert_called('POST', '/os-networks/1/action', + {"disassociate": None}, pos=-2) + self.assert_called('DELETE', '/os-security-groups/1') + + def test_show(self): + self.run_command('show 1234') + self.assert_called('GET', '/servers/1234', pos=-3) + self.assert_called('GET', '/flavors/1', pos=-2) + self.assert_called('GET', '/images/2') + + def test_show_no_image(self): + self.run_command('show 9012') + self.assert_called('GET', '/servers/9012', pos=-2) + self.assert_called('GET', '/flavors/1', pos=-1) + + def test_show_bad_id(self): + self.assertRaises(exceptions.CommandError, + self.run_command, 'show xxx') + + def test_delete(self): + self.run_command('delete 1234') + self.assert_called('DELETE', '/servers/1234') + self.run_command('delete sample-server') + self.assert_called('DELETE', '/servers/1234') + + def test_force_delete(self): + self.run_command('force-delete 1234') + self.assert_called('POST', '/servers/1234/action', + {'forceDelete': None}) + self.run_command('force-delete sample-server') + self.assert_called('POST', '/servers/1234/action', + {'forceDelete': None}) + + def test_restore(self): + self.run_command('restore 1234') + self.assert_called('POST', '/servers/1234/action', {'restore': None}) + self.run_command('restore sample-server') + self.assert_called('POST', '/servers/1234/action', {'restore': None}) + + def test_delete_two_with_two_existent(self): + self.run_command('delete 1234 5678') + self.assert_called('DELETE', '/servers/1234', pos=-3) + self.assert_called('DELETE', '/servers/5678', pos=-1) + self.run_command('delete sample-server sample-server2') + self.assert_called('GET', '/servers', pos=-6) + self.assert_called('GET', '/servers/1234', pos=-5) + self.assert_called('DELETE', '/servers/1234', pos=-4) + self.assert_called('GET', '/servers', pos=-3) + self.assert_called('GET', '/servers/5678', pos=-2) + self.assert_called('DELETE', '/servers/5678', pos=-1) + + def test_delete_two_with_one_nonexistent(self): + self.run_command('delete 1234 123456789') + self.assert_called_anytime('DELETE', '/servers/1234') + self.run_command('delete sample-server nonexistentserver') + self.assert_called_anytime('DELETE', '/servers/1234') + + def test_delete_one_with_one_nonexistent(self): + cmd = 'delete 123456789' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + cmd = 'delete nonexistent-server1' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_delete_two_with_two_nonexistent(self): + cmd = 'delete 123456789 987654321' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + cmd = 'delete nonexistent-server1 nonexistent-server2' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_diagnostics(self): + self.run_command('diagnostics 1234') + self.assert_called('GET', '/servers/1234/diagnostics') + self.run_command('diagnostics sample-server') + self.assert_called('GET', '/servers/1234/diagnostics') + + def test_set_meta_set(self): + self.run_command('meta 1234 set key1=val1 key2=val2') + self.assert_called('POST', '/servers/1234/metadata', + {'metadata': {'key1': 'val1', 'key2': 'val2'}}) + + def test_set_meta_delete_dict(self): + self.run_command('meta 1234 delete key1=val1 key2=val2') + self.assert_called('DELETE', '/servers/1234/metadata/key1') + self.assert_called('DELETE', '/servers/1234/metadata/key2', pos=-2) + + def test_set_meta_delete_keys(self): + self.run_command('meta 1234 delete key1 key2') + self.assert_called('DELETE', '/servers/1234/metadata/key1') + self.assert_called('DELETE', '/servers/1234/metadata/key2', pos=-2) + + def test_set_host_meta(self): + self.run_command('host-meta hyper set key1=val1 key2=val2') + self.assert_called('GET', '/os-hypervisors/hyper/servers', pos=0) + self.assert_called('POST', '/servers/uuid1/metadata', + {'metadata': {'key1': 'val1', 'key2': 'val2'}}, + pos=1) + self.assert_called('POST', '/servers/uuid2/metadata', + {'metadata': {'key1': 'val1', 'key2': 'val2'}}, + pos=2) + self.assert_called('POST', '/servers/uuid3/metadata', + {'metadata': {'key1': 'val1', 'key2': 'val2'}}, + pos=3) + self.assert_called('POST', '/servers/uuid4/metadata', + {'metadata': {'key1': 'val1', 'key2': 'val2'}}, + pos=4) + + def test_set_host_meta_with_no_servers(self): + self.run_command('host-meta hyper_no_servers set key1=val1 key2=val2') + self.assert_called('GET', '/os-hypervisors/hyper_no_servers/servers') + + def test_delete_host_meta(self): + self.run_command('host-meta hyper delete key1') + self.assert_called('GET', '/os-hypervisors/hyper/servers', pos=0) + self.assert_called('DELETE', '/servers/uuid1/metadata/key1', pos=1) + self.assert_called('DELETE', '/servers/uuid2/metadata/key1', pos=2) + + def test_dns_create(self): + self.run_command('dns-create 192.168.1.1 testname testdomain') + self.assert_called('PUT', + '/os-floating-ip-dns/testdomain/entries/testname') + + self.run_command('dns-create 192.168.1.1 testname testdomain --type A') + self.assert_called('PUT', + '/os-floating-ip-dns/testdomain/entries/testname') + + def test_dns_create_public_domain(self): + self.run_command('dns-create-public-domain testdomain ' + '--project test_project') + self.assert_called('PUT', '/os-floating-ip-dns/testdomain') + + def test_dns_create_private_domain(self): + self.run_command('dns-create-private-domain testdomain ' + '--availability-zone av_zone') + self.assert_called('PUT', '/os-floating-ip-dns/testdomain') + + def test_dns_delete(self): + self.run_command('dns-delete testdomain testname') + self.assert_called('DELETE', + '/os-floating-ip-dns/testdomain/entries/testname') + + def test_dns_delete_domain(self): + self.run_command('dns-delete-domain testdomain') + self.assert_called('DELETE', '/os-floating-ip-dns/testdomain') + + def test_dns_list(self): + self.run_command('dns-list testdomain --ip 192.168.1.1') + self.assert_called('GET', + '/os-floating-ip-dns/testdomain/entries?' + 'ip=192.168.1.1') + + self.run_command('dns-list testdomain --name testname') + self.assert_called('GET', + '/os-floating-ip-dns/testdomain/entries/testname') + + def test_dns_domains(self): + self.run_command('dns-domains') + self.assert_called('GET', '/os-floating-ip-dns') + + def test_floating_ip_list(self): + self.run_command('floating-ip-list') + self.assert_called('GET', '/os-floating-ips') + + def test_floating_ip_create(self): + self.run_command('floating-ip-create') + self.assert_called('GET', '/os-floating-ips/1') + + def test_floating_ip_delete(self): + self.run_command('floating-ip-delete 11.0.0.1') + self.assert_called('DELETE', '/os-floating-ips/1') + + def test_floating_ip_bulk_list(self): + self.run_command('floating-ip-bulk-list') + self.assert_called('GET', '/os-floating-ips-bulk') + + def test_floating_ip_bulk_create(self): + self.run_command('floating-ip-bulk-create 10.0.0.1/24') + self.assert_called('POST', '/os-floating-ips-bulk', + {'floating_ips_bulk_create': + {'ip_range': '10.0.0.1/24'}}) + + def test_floating_ip_bulk_create_host_and_interface(self): + self.run_command('floating-ip-bulk-create 10.0.0.1/24 --pool testPool' + ' --interface ethX') + self.assert_called('POST', '/os-floating-ips-bulk', + {'floating_ips_bulk_create': + {'ip_range': '10.0.0.1/24', + 'pool': 'testPool', + 'interface': 'ethX'}}) + + def test_floating_ip_bulk_delete(self): + self.run_command('floating-ip-bulk-delete 10.0.0.1/24') + self.assert_called('PUT', '/os-floating-ips-bulk/delete', + {'ip_range': '10.0.0.1/24'}) + + def test_server_floating_ip_add(self): + self.run_command('add-floating-ip sample-server 11.0.0.1') + self.assert_called('POST', '/servers/1234/action', + {'addFloatingIp': {'address': '11.0.0.1'}}) + + def test_server_floating_ip_remove(self): + self.run_command('remove-floating-ip sample-server 11.0.0.1') + self.assert_called('POST', '/servers/1234/action', + {'removeFloatingIp': {'address': '11.0.0.1'}}) + + def test_usage_list(self): + self.run_command('usage-list --start 2000-01-20 --end 2005-02-01') + self.assert_called('GET', + '/os-simple-tenant-usage?' + + 'start=2000-01-20T00:00:00&' + + 'end=2005-02-01T00:00:00&' + + 'detailed=1') + + def test_usage_list_no_args(self): + timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 0)) + self.run_command('usage-list') + self.assert_called('GET', + '/os-simple-tenant-usage?' + + 'start=2005-01-04T00:00:00&' + + 'end=2005-02-02T00:00:00&' + + 'detailed=1') + + def test_usage(self): + self.run_command('usage --start 2000-01-20 --end 2005-02-01 ' + '--tenant test') + self.assert_called('GET', + '/os-simple-tenant-usage/test?' + + 'start=2000-01-20T00:00:00&' + + 'end=2005-02-01T00:00:00') + + def test_usage_no_tenant(self): + self.run_command('usage --start 2000-01-20 --end 2005-02-01') + self.assert_called('GET', + '/os-simple-tenant-usage/tenant_id?' + + 'start=2000-01-20T00:00:00&' + + 'end=2005-02-01T00:00:00') + + def test_flavor_delete(self): + self.run_command("flavor-delete 2") + self.assert_called('DELETE', '/flavors/2') + + def test_flavor_create(self): + self.run_command("flavor-create flavorcreate " + "1234 512 10 1 --swap 1024 --ephemeral 10 " + "--is-public true") + self.assert_called('POST', '/flavors', pos=-2) + self.assert_called('GET', '/flavors/1', pos=-1) + + def test_aggregate_list(self): + self.run_command('aggregate-list') + self.assert_called('GET', '/os-aggregates') + + def test_aggregate_create(self): + self.run_command('aggregate-create test_name nova1') + body = {"aggregate": {"name": "test_name", + "availability_zone": "nova1"}} + self.assert_called('POST', '/os-aggregates', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_delete_by_id(self): + self.run_command('aggregate-delete 1') + self.assert_called('DELETE', '/os-aggregates/1') + + def test_aggregate_delete_by_name(self): + self.run_command('aggregate-delete test') + self.assert_called('DELETE', '/os-aggregates/1') + + def test_aggregate_update_by_id(self): + self.run_command('aggregate-update 1 new_name') + body = {"aggregate": {"name": "new_name"}} + self.assert_called('PUT', '/os-aggregates/1', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_update_by_name(self): + self.run_command('aggregate-update test new_name') + body = {"aggregate": {"name": "new_name"}} + self.assert_called('PUT', '/os-aggregates/1', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_update_with_availability_zone_by_id(self): + self.run_command('aggregate-update 1 foo new_zone') + body = {"aggregate": {"name": "foo", "availability_zone": "new_zone"}} + self.assert_called('PUT', '/os-aggregates/1', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_update_with_availability_zone_by_name(self): + self.run_command('aggregate-update test foo new_zone') + body = {"aggregate": {"name": "foo", "availability_zone": "new_zone"}} + self.assert_called('PUT', '/os-aggregates/1', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_set_metadata_by_id(self): + self.run_command('aggregate-set-metadata 1 foo=bar delete_key') + body = {"set_metadata": {"metadata": {"foo": "bar", + "delete_key": None}}} + self.assert_called('POST', '/os-aggregates/1/action', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_set_metadata_by_name(self): + self.run_command('aggregate-set-metadata test foo=bar delete_key') + body = {"set_metadata": {"metadata": {"foo": "bar", + "delete_key": None}}} + self.assert_called('POST', '/os-aggregates/1/action', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_add_host_by_id(self): + self.run_command('aggregate-add-host 1 host1') + body = {"add_host": {"host": "host1"}} + self.assert_called('POST', '/os-aggregates/1/action', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_add_host_by_name(self): + self.run_command('aggregate-add-host test host1') + body = {"add_host": {"host": "host1"}} + self.assert_called('POST', '/os-aggregates/1/action', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_remove_host_by_id(self): + self.run_command('aggregate-remove-host 1 host1') + body = {"remove_host": {"host": "host1"}} + self.assert_called('POST', '/os-aggregates/1/action', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_remove_host_by_name(self): + self.run_command('aggregate-remove-host test host1') + body = {"remove_host": {"host": "host1"}} + self.assert_called('POST', '/os-aggregates/1/action', body, pos=-2) + self.assert_called('GET', '/os-aggregates/1', pos=-1) + + def test_aggregate_details_by_id(self): + self.run_command('aggregate-details 1') + self.assert_called('GET', '/os-aggregates/1') + + def test_aggregate_details_by_name(self): + self.run_command('aggregate-details test') + self.assert_called('GET', '/os-aggregates') + + def test_live_migration(self): + self.run_command('live-migration sample-server hostname') + self.assert_called('POST', '/servers/1234/action', + {'os-migrateLive': {'host': 'hostname', + 'block_migration': False, + 'disk_over_commit': False}}) + self.run_command('live-migration sample-server hostname' + ' --block-migrate') + self.assert_called('POST', '/servers/1234/action', + {'os-migrateLive': {'host': 'hostname', + 'block_migration': True, + 'disk_over_commit': False}}) + self.run_command('live-migration sample-server hostname' + ' --block-migrate --disk-over-commit') + self.assert_called('POST', '/servers/1234/action', + {'os-migrateLive': {'host': 'hostname', + 'block_migration': True, + 'disk_over_commit': True}}) + + def test_reset_state(self): + self.run_command('reset-state sample-server') + self.assert_called('POST', '/servers/1234/action', + {'os-resetState': {'state': 'error'}}) + self.run_command('reset-state sample-server --active') + self.assert_called('POST', '/servers/1234/action', + {'os-resetState': {'state': 'active'}}) + + def test_reset_network(self): + self.run_command('reset-network sample-server') + self.assert_called('POST', '/servers/1234/action', + {'resetNetwork': None}) + + def test_services_list(self): + self.run_command('service-list') + self.assert_called('GET', '/os-services') + + def test_services_list_with_host(self): + self.run_command('service-list --host host1') + self.assert_called('GET', '/os-services?host=host1') + + def test_services_list_with_binary(self): + self.run_command('service-list --binary nova-cert') + self.assert_called('GET', '/os-services?binary=nova-cert') + + def test_services_list_with_host_binary(self): + self.run_command('service-list --host host1 --binary nova-cert') + self.assert_called('GET', '/os-services?host=host1&binary=nova-cert') + + def test_services_enable(self): + self.run_command('service-enable host1 nova-cert') + body = {'host': 'host1', 'binary': 'nova-cert'} + self.assert_called('PUT', '/os-services/enable', body) + + def test_services_disable(self): + self.run_command('service-disable host1 nova-cert') + body = {'host': 'host1', 'binary': 'nova-cert'} + self.assert_called('PUT', '/os-services/disable', body) + + def test_services_disable_with_reason(self): + self.run_command('service-disable host1 nova-cert --reason no_reason') + body = {'host': 'host1', 'binary': 'nova-cert', + 'disabled_reason': 'no_reason'} + self.assert_called('PUT', '/os-services/disable-log-reason', body) + + def test_fixed_ips_get(self): + self.run_command('fixed-ip-get 192.168.1.1') + self.assert_called('GET', '/os-fixed-ips/192.168.1.1') + + def test_fixed_ips_reserve(self): + self.run_command('fixed-ip-reserve 192.168.1.1') + body = {'reserve': None} + self.assert_called('POST', '/os-fixed-ips/192.168.1.1/action', body) + + def test_fixed_ips_unreserve(self): + self.run_command('fixed-ip-unreserve 192.168.1.1') + body = {'unreserve': None} + self.assert_called('POST', '/os-fixed-ips/192.168.1.1/action', body) + + def test_host_list(self): + self.run_command('host-list') + self.assert_called('GET', '/os-hosts') + + def test_host_list_with_zone(self): + self.run_command('host-list --zone nova') + self.assert_called('GET', '/os-hosts?zone=nova') + + def test_host_update_status(self): + self.run_command('host-update sample-host_1 --status enabled') + body = {'status': 'enabled'} + self.assert_called('PUT', '/os-hosts/sample-host_1', body) + + def test_host_update_maintenance(self): + self.run_command('host-update sample-host_2 --maintenance enable') + body = {'maintenance_mode': 'enable'} + self.assert_called('PUT', '/os-hosts/sample-host_2', body) + + def test_host_update_multiple_settings(self): + self.run_command('host-update sample-host_3 ' + '--status disabled --maintenance enable') + body = {'status': 'disabled', 'maintenance_mode': 'enable'} + self.assert_called('PUT', '/os-hosts/sample-host_3', body) + + def test_host_startup(self): + self.run_command('host-action sample-host --action startup') + self.assert_called( + 'GET', '/os-hosts/sample-host/startup') + + def test_host_shutdown(self): + self.run_command('host-action sample-host --action shutdown') + self.assert_called( + 'GET', '/os-hosts/sample-host/shutdown') + + def test_host_reboot(self): + self.run_command('host-action sample-host --action reboot') + self.assert_called( + 'GET', '/os-hosts/sample-host/reboot') + + def test_host_evacuate(self): + self.run_command('host-evacuate hyper --target target_hyper') + self.assert_called('GET', '/os-hypervisors/hyper/servers', pos=0) + self.assert_called('POST', '/servers/uuid1/action', + {'evacuate': {'host': 'target_hyper', + 'onSharedStorage': False}}, pos=1) + self.assert_called('POST', '/servers/uuid2/action', + {'evacuate': {'host': 'target_hyper', + 'onSharedStorage': False}}, pos=2) + self.assert_called('POST', '/servers/uuid3/action', + {'evacuate': {'host': 'target_hyper', + 'onSharedStorage': False}}, pos=3) + self.assert_called('POST', '/servers/uuid4/action', + {'evacuate': {'host': 'target_hyper', + 'onSharedStorage': False}}, pos=4) + + def test_host_evacuate_with_shared_storage(self): + self.run_command( + 'host-evacuate --on-shared-storage hyper --target target_hyper') + self.assert_called('GET', '/os-hypervisors/hyper/servers', pos=0) + self.assert_called('POST', '/servers/uuid1/action', + {'evacuate': {'host': 'target_hyper', + 'onSharedStorage': True}}, pos=1) + self.assert_called('POST', '/servers/uuid2/action', + {'evacuate': {'host': 'target_hyper', + 'onSharedStorage': True}}, pos=2) + self.assert_called('POST', '/servers/uuid3/action', + {'evacuate': {'host': 'target_hyper', + 'onSharedStorage': True}}, pos=3) + self.assert_called('POST', '/servers/uuid4/action', + {'evacuate': {'host': 'target_hyper', + 'onSharedStorage': True}}, pos=4) + + def test_host_evacuate_with_no_target_host(self): + self.run_command('host-evacuate --on-shared-storage hyper') + self.assert_called('GET', '/os-hypervisors/hyper/servers', pos=0) + self.assert_called('POST', '/servers/uuid1/action', + {'evacuate': {'host': None, + 'onSharedStorage': True}}, pos=1) + self.assert_called('POST', '/servers/uuid2/action', + {'evacuate': {'host': None, + 'onSharedStorage': True}}, pos=2) + self.assert_called('POST', '/servers/uuid3/action', + {'evacuate': {'host': None, + 'onSharedStorage': True}}, pos=3) + self.assert_called('POST', '/servers/uuid4/action', + {'evacuate': {'host': None, + 'onSharedStorage': True}}, pos=4) + + def test_host_servers_migrate(self): + self.run_command('host-servers-migrate hyper') + self.assert_called('GET', '/os-hypervisors/hyper/servers', pos=0) + self.assert_called('POST', + '/servers/uuid1/action', {'migrate': None}, pos=1) + self.assert_called('POST', + '/servers/uuid2/action', {'migrate': None}, pos=2) + self.assert_called('POST', + '/servers/uuid3/action', {'migrate': None}, pos=3) + self.assert_called('POST', + '/servers/uuid4/action', {'migrate': None}, pos=4) + + def test_coverage_start(self): + self.run_command('coverage-start') + self.assert_called('POST', '/os-coverage/action') + + def test_coverage_start_with_combine(self): + self.run_command('coverage-start --combine') + body = {'start': {'combine': True}} + self.assert_called('POST', '/os-coverage/action', body) + + def test_coverage_stop(self): + self.run_command('coverage-stop') + self.assert_called_anytime('POST', '/os-coverage/action') + + def test_coverage_report(self): + self.run_command('coverage-report report') + self.assert_called_anytime('POST', '/os-coverage/action') + + def test_coverage_report_with_html(self): + self.run_command('coverage-report report --html') + body = {'report': {'html': True, 'file': 'report'}} + self.assert_called_anytime('POST', '/os-coverage/action', body) + + def test_coverage_report_with_xml(self): + self.run_command('coverage-report report --xml') + body = {'report': {'xml': True, 'file': 'report'}} + self.assert_called_anytime('POST', '/os-coverage/action', body) + + def test_coverage_reset(self): + self.run_command('coverage-reset') + body = {'reset': {}} + self.assert_called_anytime('POST', '/os-coverage/action', body) + + def test_hypervisor_list(self): + self.run_command('hypervisor-list') + self.assert_called('GET', '/os-hypervisors') + + def test_hypervisor_list_matching(self): + self.run_command('hypervisor-list --matching hyper') + self.assert_called('GET', '/os-hypervisors/hyper/search') + + def test_hypervisor_servers(self): + self.run_command('hypervisor-servers hyper') + self.assert_called('GET', '/os-hypervisors/hyper/servers') + + def test_hypervisor_show_by_id(self): + self.run_command('hypervisor-show 1234') + self.assert_called('GET', '/os-hypervisors/1234') + + def test_hypervisor_show_by_name(self): + self.run_command('hypervisor-show hyper1') + self.assert_called('GET', '/os-hypervisors/detail') + + def test_hypervisor_uptime_by_id(self): + self.run_command('hypervisor-uptime 1234') + self.assert_called('GET', '/os-hypervisors/1234/uptime') + + def test_hypervisor_uptime_by_name(self): + self.run_command('hypervisor-uptime hyper1') + self.assert_called('GET', '/os-hypervisors/1234/uptime') + + def test_hypervisor_stats(self): + self.run_command('hypervisor-stats') + self.assert_called('GET', '/os-hypervisors/statistics') + + def test_quota_show(self): + self.run_command('quota-show --tenant ' + '97f4c221bff44578b0300df4ef119353') + self.assert_called('GET', + '/os-quota-sets/97f4c221bff44578b0300df4ef119353') + + def test_user_quota_show(self): + self.run_command('quota-show --tenant ' + '97f4c221bff44578b0300df4ef119353 --user u1') + self.assert_called('GET', + '/os-quota-sets/97f4c221bff44578b0300df4ef119353?user_id=u1') + + def test_quota_show_no_tenant(self): + self.run_command('quota-show') + self.assert_called('GET', '/os-quota-sets/tenant_id') + + def test_quota_defaults(self): + self.run_command('quota-defaults --tenant ' + '97f4c221bff44578b0300df4ef119353') + self.assert_called('GET', + '/os-quota-sets/97f4c221bff44578b0300df4ef119353/defaults') + + def test_quota_defaults_no_tenant(self): + self.run_command('quota-defaults') + self.assert_called('GET', '/os-quota-sets/tenant_id/defaults') + + def test_quota_update(self): + self.run_command( + 'quota-update 97f4c221bff44578b0300df4ef119353' + ' --instances=5') + self.assert_called( + 'PUT', + '/os-quota-sets/97f4c221bff44578b0300df4ef119353', + {'quota_set': {'instances': 5, + 'tenant_id': '97f4c221bff44578b0300df4ef119353'}}) + + def test_user_quota_update(self): + self.run_command( + 'quota-update 97f4c221bff44578b0300df4ef119353' + ' --user=u1' + ' --instances=5') + self.assert_called( + 'PUT', + '/os-quota-sets/97f4c221bff44578b0300df4ef119353?user_id=u1', + {'quota_set': {'instances': 5, + 'tenant_id': '97f4c221bff44578b0300df4ef119353'}}) + + def test_quota_force_update(self): + self.run_command( + 'quota-update 97f4c221bff44578b0300df4ef119353' + ' --instances=5 --force') + self.assert_called( + 'PUT', '/os-quota-sets/97f4c221bff44578b0300df4ef119353', + {'quota_set': {'force': True, + 'instances': 5, + 'tenant_id': '97f4c221bff44578b0300df4ef119353'}}) + + def test_quota_update_fixed_ip(self): + self.run_command( + 'quota-update 97f4c221bff44578b0300df4ef119353' + ' --fixed-ips=5') + self.assert_called( + 'PUT', '/os-quota-sets/97f4c221bff44578b0300df4ef119353', + {'quota_set': {'fixed_ips': 5, + 'tenant_id': '97f4c221bff44578b0300df4ef119353'}}) + + def test_quota_delete(self): + self.run_command('quota-delete --tenant ' + '97f4c221bff44578b0300df4ef119353') + self.assert_called('DELETE', + '/os-quota-sets/97f4c221bff44578b0300df4ef119353') + + def test_user_quota_delete(self): + self.run_command('quota-delete --tenant ' + '97f4c221bff44578b0300df4ef119353 ' + '--user u1') + self.assert_called('DELETE', + '/os-quota-sets/97f4c221bff44578b0300df4ef119353?user_id=u1') + + def test_quota_class_show(self): + self.run_command('quota-class-show test') + self.assert_called('GET', '/os-quota-class-sets/test') + + def test_quota_class_update(self): + self.run_command('quota-class-update 97f4c221bff44578b0300df4ef119353' + ' --instances=5') + self.assert_called('PUT', + '/os-quota-class-sets/97f4c221bff44578b0300' + 'df4ef119353') + + def test_network_list(self): + self.run_command('network-list') + self.assert_called('GET', '/os-networks') + + def test_network_show(self): + self.run_command('network-show 1') + self.assert_called('GET', '/os-networks/1') + + def test_cloudpipe_list(self): + self.run_command('cloudpipe-list') + self.assert_called('GET', '/os-cloudpipe') + + def test_cloudpipe_create(self): + self.run_command('cloudpipe-create myproject') + body = {'cloudpipe': {'project_id': "myproject"}} + self.assert_called('POST', '/os-cloudpipe', body) + + def test_cloudpipe_configure(self): + self.run_command('cloudpipe-configure 192.168.1.1 1234') + body = {'configure_project': {'vpn_ip': "192.168.1.1", + 'vpn_port': '1234'}} + self.assert_called('PUT', '/os-cloudpipe/configure-project', body) + + def test_network_associate_host(self): + self.run_command('network-associate-host 1 testHost') + body = {'associate_host': 'testHost'} + self.assert_called('POST', '/os-networks/1/action', body) + + def test_network_associate_project(self): + self.run_command('network-associate-project 1') + body = {'id': "1"} + self.assert_called('POST', '/os-networks/add', body) + + def test_network_disassociate_host(self): + self.run_command('network-disassociate --host-only 1 2') + body = {'disassociate_host': None} + self.assert_called('POST', '/os-networks/2/action', body) + + def test_network_disassociate_project(self): + self.run_command('network-disassociate --project-only 1 2') + body = {'disassociate_project': None} + self.assert_called('POST', '/os-networks/2/action', body) + + def test_network_create_v4(self): + self.run_command('network-create --fixed-range-v4 10.0.1.0/24' + ' --dns1 10.0.1.254 new_network') + body = {'network': {'cidr': '10.0.1.0/24', 'label': 'new_network', + 'dns1': '10.0.1.254'}} + self.assert_called('POST', '/os-networks', body) + + def test_network_create_v6(self): + self.run_command('network-create --fixed-range-v6 2001::/64' + ' new_network') + body = {'network': {'cidr_v6': '2001::/64', 'label': 'new_network'}} + self.assert_called('POST', '/os-networks', body) + + def test_network_create_invalid(self): + cmd = 'network-create 10.0.1.0' + self.assertRaises(exceptions.CommandError, self.run_command, cmd) + + def test_network_create_multi_host(self): + self.run_command('network-create --fixed-range-v4 192.168.0.0/24' + ' --multi-host=T new_network') + body = {'network': {'cidr': '192.168.0.0/24', 'label': 'new_network', + 'multi_host': True}} + self.assert_called('POST', '/os-networks', body) + + self.run_command('network-create --fixed-range-v4 192.168.0.0/24' + ' --multi-host=True new_network') + body = {'network': {'cidr': '192.168.0.0/24', 'label': 'new_network', + 'multi_host': True}} + self.assert_called('POST', '/os-networks', body) + + self.run_command('network-create --fixed-range-v4 192.168.0.0/24' + ' --multi-host=1 new_network') + body = {'network': {'cidr': '192.168.0.0/24', 'label': 'new_network', + 'multi_host': True}} + self.assert_called('POST', '/os-networks', body) + + self.run_command('network-create --fixed-range-v4 192.168.1.0/24' + ' --multi-host=F new_network') + body = {'network': {'cidr': '192.168.1.0/24', 'label': 'new_network', + 'multi_host': False}} + self.assert_called('POST', '/os-networks', body) + + def test_network_create_vlan(self): + self.run_command('network-create --fixed-range-v4 192.168.0.0/24' + ' --vlan=200 new_network') + body = {'network': {'cidr': '192.168.0.0/24', 'label': 'new_network', + 'vlan_start': '200'}} + self.assert_called('POST', '/os-networks', body) + + def test_add_fixed_ip(self): + self.run_command('add-fixed-ip sample-server 1') + self.assert_called('POST', '/servers/1234/action', + {'addFixedIp': {'networkId': '1'}}) + + def test_remove_fixed_ip(self): + self.run_command('remove-fixed-ip sample-server 10.0.0.10') + self.assert_called('POST', '/servers/1234/action', + {'removeFixedIp': {'address': '10.0.0.10'}}) + + def test_backup(self): + self.run_command('backup sample-server back1 daily 1') + self.assert_called('POST', '/servers/1234/action', + {'createBackup': {'name': 'back1', + 'backup_type': 'daily', + 'rotation': '1'}}) + self.run_command('backup 1234 back1 daily 1') + self.assert_called('POST', '/servers/1234/action', + {'createBackup': {'name': 'back1', + 'backup_type': 'daily', + 'rotation': '1'}}) + + def test_absolute_limits(self): + self.run_command('absolute-limits') + self.assert_called('GET', '/limits') + + self.run_command('absolute-limits --reserved') + self.assert_called('GET', '/limits?reserved=1') + + self.run_command('absolute-limits --tenant 1234') + self.assert_called('GET', '/limits?tenant_id=1234') + + def test_evacuate(self): + self.run_command('evacuate sample-server new_host') + self.assert_called('POST', '/servers/1234/action', + {'evacuate': {'host': 'new_host', + 'onSharedStorage': False}}) + self.run_command('evacuate sample-server new_host ' + '--password NewAdminPass') + self.assert_called('POST', '/servers/1234/action', + {'evacuate': {'host': 'new_host', + 'onSharedStorage': False, + 'adminPass': 'NewAdminPass'}}) + self.run_command('evacuate sample-server new_host') + self.assert_called('POST', '/servers/1234/action', + {'evacuate': {'host': 'new_host', + 'onSharedStorage': False}}) + self.run_command('evacuate sample-server new_host ' + '--on-shared-storage') + self.assert_called('POST', '/servers/1234/action', + {'evacuate': {'host': 'new_host', + 'onSharedStorage': True}}) + + def test_get_password(self): + self.run_command('get-password sample-server /foo/id_rsa') + self.assert_called('GET', '/servers/1234/os-server-password') + + def test_clear_password(self): + self.run_command('clear-password sample-server') + self.assert_called('DELETE', '/servers/1234/os-server-password') + + def test_availability_zone_list(self): + self.run_command('availability-zone-list') + self.assert_called('GET', '/os-availability-zone/detail') + + def test_security_group_create(self): + self.run_command('secgroup-create test FAKE_SECURITY_GROUP') + self.assert_called('POST', '/os-security-groups', + {'security_group': + {'name': 'test', + 'description': 'FAKE_SECURITY_GROUP'}}) + + def test_security_group_update(self): + self.run_command('secgroup-update test te FAKE_SECURITY_GROUP') + self.assert_called('PUT', '/os-security-groups/1', + {'security_group': + {'name': 'te', + 'description': 'FAKE_SECURITY_GROUP'}}) + + def test_security_group_list(self): + self.run_command('secgroup-list') + self.assert_called('GET', '/os-security-groups') + + def test_security_group_add_rule(self): + self.run_command('secgroup-add-rule test tcp 22 22 10.0.0.0/8') + self.assert_called('POST', '/os-security-group-rules', + {'security_group_rule': + {'from_port': 22, + 'ip_protocol': 'tcp', + 'to_port': 22, + 'parent_group_id': 1, + 'cidr': '10.0.0.0/8', + 'group_id': None}}) + + def test_security_group_delete_rule(self): + self.run_command('secgroup-delete-rule test TCP 22 22 10.0.0.0/8') + self.assert_called('DELETE', '/os-security-group-rules/11') + + def test_security_group_delete_rule_protocol_case(self): + self.run_command('secgroup-delete-rule test tcp 22 22 10.0.0.0/8') + self.assert_called('DELETE', '/os-security-group-rules/11') + + def test_security_group_add_group_rule(self): + self.run_command('secgroup-add-group-rule test test2 tcp 22 22') + self.assert_called('POST', '/os-security-group-rules', + {'security_group_rule': + {'from_port': 22, + 'ip_protocol': 'TCP', + 'to_port': 22, + 'parent_group_id': 1, + 'cidr': None, + 'group_id': 2}}) + + def test_security_group_delete_group_rule(self): + self.run_command('secgroup-delete-group-rule test test2 TCP 222 222') + self.assert_called('DELETE', '/os-security-group-rules/12') + + def test_security_group_delete_group_rule_protocol_case(self): + self.run_command('secgroup-delete-group-rule test test2 tcp 222 222') + self.assert_called('DELETE', '/os-security-group-rules/12') + + def test_security_group_list_rules(self): + self.run_command('secgroup-list-rules test') + self.assert_called('GET', '/os-security-groups') + + def test_security_group_list_all_tenants(self): + self.run_command('secgroup-list --all-tenants 1') + self.assert_called('GET', '/os-security-groups?all_tenants=1') + + def test_security_group_delete(self): + self.run_command('secgroup-delete test') + self.assert_called('DELETE', '/os-security-groups/1') + + def test_server_security_group_add(self): + self.run_command('add-secgroup sample-server testgroup') + self.assert_called('POST', '/servers/1234/action', + {'addSecurityGroup': {'name': 'testgroup'}}) + + def test_server_security_group_remove(self): + self.run_command('remove-secgroup sample-server testgroup') + self.assert_called('POST', '/servers/1234/action', + {'removeSecurityGroup': {'name': 'testgroup'}}) + + def test_server_security_group_list(self): + self.run_command('list-secgroup 1234') + self.assert_called('GET', '/servers/1234/os-security-groups') + + def test_interface_list(self): + self.run_command('interface-list 1234') + self.assert_called('GET', '/servers/1234/os-interface') + + def test_interface_attach(self): + self.run_command('interface-attach --port-id port_id 1234') + self.assert_called('POST', '/servers/1234/os-interface', + {'interfaceAttachment': {'port_id': 'port_id'}}) + + def test_interface_detach(self): + self.run_command('interface-detach 1234 port_id') + self.assert_called('DELETE', '/servers/1234/os-interface/port_id') + + def test_volume_list(self): + self.run_command('volume-list') + self.assert_called('GET', '/volumes/detail') + + def test_volume_show(self): + self.run_command('volume-show Work') + self.assert_called('GET', '/volumes', pos=-2) + self.assert_called( + 'GET', + '/volumes/15e59938-07d5-11e1-90e3-e3dffe0c5983', + pos=-1 + ) + + def test_volume_create(self): + self.run_command('volume-create 2 --display-name Work') + self.assert_called('POST', '/volumes', + {'volume': + {'display_name': 'Work', + 'imageRef': None, + 'availability_zone': None, + 'volume_type': None, + 'display_description': None, + 'snapshot_id': None, + 'size': 2}}) + + def test_volume_delete(self): + self.run_command('volume-delete Work') + self.assert_called('DELETE', + '/volumes/15e59938-07d5-11e1-90e3-e3dffe0c5983') + + def test_volume_attach(self): + self.run_command('volume-attach sample-server Work /dev/vdb') + self.assert_called('POST', '/servers/1234/os-volume_attachments', + {'volumeAttachment': + {'device': '/dev/vdb', + 'volumeId': 'Work'}}) + + def test_volume_update(self): + self.run_command('volume-update sample-server Work Work') + self.assert_called('PUT', '/servers/1234/os-volume_attachments/Work', + {'volumeAttachment': {'volumeId': 'Work'}}) + + def test_volume_detach(self): + self.run_command('volume-detach sample-server Work') + self.assert_called('DELETE', + '/servers/1234/os-volume_attachments/Work') + + def test_instance_action_list(self): + self.run_command('instance-action-list sample-server') + self.assert_called('GET', '/servers/1234/os-instance-actions') + + def test_instance_action_get(self): + self.run_command('instance-action sample-server req-abcde12345') + self.assert_called('GET', + '/servers/1234/os-instance-actions/req-abcde12345') + + def test_cell_show(self): + self.run_command('cell-show child_cell') + self.assert_called('GET', '/os-cells/child_cell') + + def test_cell_capacities_with_cell_name(self): + self.run_command('cell-capacities --cell child_cell') + self.assert_called('GET', '/os-cells/child_cell/capacities') + + def test_cell_capacities_without_cell_name(self): + self.run_command('cell-capacities') + self.assert_called('GET', '/os-cells/capacities') + + def test_migration_list(self): + self.run_command('migration-list') + self.assert_called('GET', '/os-migrations') + + def test_migration_list_with_filters(self): + self.run_command('migration-list --host host1 --cell_name child1 ' + '--status finished') + self.assert_called('GET', + '/os-migrations?status=finished&host=host1' + '&cell_name=child1') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_usage.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_usage.py new file mode 100644 index 0000000000..49b0baedff --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_usage.py @@ -0,0 +1,35 @@ +import datetime + +from novaclient.v1_1 import usage +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class UsageTest(utils.TestCase): + + def test_usage_list(self, detailed=False): + now = datetime.datetime.now() + usages = cs.usage.list(now, now, detailed) + + cs.assert_called('GET', + "/os-simple-tenant-usage?" + + ("start=%s&" % now.isoformat()) + + ("end=%s&" % now.isoformat()) + + ("detailed=%s" % int(bool(detailed)))) + [self.assertTrue(isinstance(u, usage.Usage)) for u in usages] + + def test_usage_list_detailed(self): + self.test_usage_list(True) + + def test_usage_get(self): + now = datetime.datetime.now() + u = cs.usage.get("tenantfoo", now, now) + + cs.assert_called('GET', + "/os-simple-tenant-usage/tenantfoo?" + + ("start=%s&" % now.isoformat()) + + ("end=%s" % now.isoformat())) + self.assertTrue(isinstance(u, usage.Usage)) diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/test_volumes.py b/awx/lib/site-packages/novaclient/tests/v1_1/test_volumes.py new file mode 100644 index 0000000000..131a032d76 --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/test_volumes.py @@ -0,0 +1,93 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1 import volumes +from novaclient.tests import utils +from novaclient.tests.v1_1 import fakes + + +cs = fakes.FakeClient() + + +class VolumesTest(utils.TestCase): + + def test_list_servers(self): + vl = cs.volumes.list() + cs.assert_called('GET', '/volumes/detail') + [self.assertTrue(isinstance(v, volumes.Volume)) for v in vl] + + def test_list_volumes_undetailed(self): + vl = cs.volumes.list(detailed=False) + cs.assert_called('GET', '/volumes') + [self.assertTrue(isinstance(v, volumes.Volume)) for v in vl] + + def test_get_volume_details(self): + vol_id = '15e59938-07d5-11e1-90e3-e3dffe0c5983' + v = cs.volumes.get(vol_id) + cs.assert_called('GET', '/volumes/%s' % vol_id) + self.assertTrue(isinstance(v, volumes.Volume)) + self.assertEqual(v.id, vol_id) + + def test_create_volume(self): + v = cs.volumes.create( + size=2, + display_name="My volume", + display_description="My volume desc", + ) + cs.assert_called('POST', '/volumes') + self.assertTrue(isinstance(v, volumes.Volume)) + + def test_delete_volume(self): + vol_id = '15e59938-07d5-11e1-90e3-e3dffe0c5983' + v = cs.volumes.get(vol_id) + v.delete() + cs.assert_called('DELETE', '/volumes/%s' % vol_id) + cs.volumes.delete(vol_id) + cs.assert_called('DELETE', '/volumes/%s' % vol_id) + cs.volumes.delete(v) + cs.assert_called('DELETE', '/volumes/%s' % vol_id) + + def test_create_server_volume(self): + v = cs.volumes.create_server_volume( + server_id=1234, + volume_id='15e59938-07d5-11e1-90e3-e3dffe0c5983', + device='/dev/vdb' + ) + cs.assert_called('POST', '/servers/1234/os-volume_attachments') + self.assertTrue(isinstance(v, volumes.Volume)) + + def test_update_server_volume(self): + vol_id = '15e59938-07d5-11e1-90e3-e3dffe0c5983' + v = cs.volumes.update_server_volume( + server_id=1234, + attachment_id='Work', + new_volume_id=vol_id + ) + cs.assert_called('PUT', '/servers/1234/os-volume_attachments/Work') + self.assertTrue(isinstance(v, volumes.Volume)) + + def test_get_server_volume(self): + v = cs.volumes.get_server_volume(1234, 'Work') + cs.assert_called('GET', '/servers/1234/os-volume_attachments/Work') + self.assertTrue(isinstance(v, volumes.Volume)) + + def test_list_server_volumes(self): + vl = cs.volumes.get_server_volumes(1234) + cs.assert_called('GET', '/servers/1234/os-volume_attachments') + [self.assertTrue(isinstance(v, volumes.Volume)) for v in vl] + + def test_delete_server_volume(self): + cs.volumes.delete_server_volume(1234, 'Work') + cs.assert_called('DELETE', '/servers/1234/os-volume_attachments/Work') diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/testfile.txt b/awx/lib/site-packages/novaclient/tests/v1_1/testfile.txt new file mode 100644 index 0000000000..e4e860f38a --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/testfile.txt @@ -0,0 +1 @@ +BLAH diff --git a/awx/lib/site-packages/novaclient/tests/v1_1/utils.py b/awx/lib/site-packages/novaclient/tests/v1_1/utils.py new file mode 100644 index 0000000000..eaf108f55b --- /dev/null +++ b/awx/lib/site-packages/novaclient/tests/v1_1/utils.py @@ -0,0 +1,29 @@ +from nose.tools import ok_ + + +def fail(msg): + raise AssertionError(msg) + + +def assert_in(thing, seq, msg=None): + msg = msg or "'%s' not found in %s" % (thing, seq) + ok_(thing in seq, msg) + + +def assert_not_in(thing, seq, msg=None): + msg = msg or "unexpected '%s' found in %s" % (thing, seq) + ok_(thing not in seq, msg) + + +def assert_has_keys(dict, required=[], optional=[]): + keys = dict.keys() + for k in required: + assert_in(k, keys, "required key %s missing from %s" % (k, dict)) + allowed_keys = set(required) | set(optional) + extra_keys = set(keys).difference(allowed_keys) + if extra_keys: + fail("found unexpected keys: %s" % list(extra_keys)) + + +def assert_isinstance(thing, kls): + ok_(isinstance(thing, kls), "%s is not an instance of %s" % (thing, kls)) diff --git a/awx/lib/site-packages/novaclient/utils.py b/awx/lib/site-packages/novaclient/utils.py new file mode 100644 index 0000000000..5abecb6c74 --- /dev/null +++ b/awx/lib/site-packages/novaclient/utils.py @@ -0,0 +1,368 @@ +import os +import pkg_resources +import re +import sys +import textwrap +import uuid + +import prettytable +import six + +from novaclient import exceptions +from novaclient.openstack.common import strutils + + +def arg(*args, **kwargs): + """Decorator for CLI args.""" + def _decorator(func): + add_arg(func, *args, **kwargs) + return func + return _decorator + + +def env(*args, **kwargs): + """ + returns the first environment variable set + if none are non-empty, defaults to '' or keyword arg default + """ + for arg in args: + value = os.environ.get(arg, None) + if value: + return value + return kwargs.get('default', '') + + +def add_arg(f, *args, **kwargs): + """Bind CLI arguments to a shell.py `do_foo` function.""" + + if not hasattr(f, 'arguments'): + f.arguments = [] + + # NOTE(sirp): avoid dups that can occur when the module is shared across + # tests. + if (args, kwargs) not in f.arguments: + # Because of the sematics of decorator composition if we just append + # to the options list positional options will appear to be backwards. + f.arguments.insert(0, (args, kwargs)) + + +def bool_from_str(val): + """Convert a string representation of a bool into a bool value.""" + + if not val: + return False + try: + return bool(int(val)) + except ValueError: + if val.lower() in ['true', 'yes', 'y']: + return True + if val.lower() in ['false', 'no', 'n']: + return False + raise + + +def add_resource_manager_extra_kwargs_hook(f, hook): + """Add hook to bind CLI arguments to ResourceManager calls. + + The `do_foo` calls in shell.py will receive CLI args and then in turn pass + them through to the ResourceManager. Before passing through the args, the + hooks registered here will be called, giving us a chance to add extra + kwargs (taken from the command-line) to what's passed to the + ResourceManager. + """ + if not hasattr(f, 'resource_manager_kwargs_hooks'): + f.resource_manager_kwargs_hooks = [] + + names = [h.__name__ for h in f.resource_manager_kwargs_hooks] + if hook.__name__ not in names: + f.resource_manager_kwargs_hooks.append(hook) + + +def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False): + """Return extra_kwargs by calling resource manager kwargs hooks.""" + hooks = getattr(f, "resource_manager_kwargs_hooks", []) + extra_kwargs = {} + for hook in hooks: + hook_kwargs = hook(args) + + conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys()) + if conflicting_keys and not allow_conflicts: + raise Exception("Hook '%(hook_name)s' is attempting to redefine" + " attributes '%(conflicting_keys)s'" % + {'hook_name': hook_name, + 'conflicting_keys': conflicting_keys}) + + extra_kwargs.update(hook_kwargs) + + return extra_kwargs + + +def unauthenticated(f): + """ + Adds 'unauthenticated' attribute to decorated function. + Usage: + @unauthenticated + def mymethod(f): + ... + """ + f.unauthenticated = True + return f + + +def isunauthenticated(f): + """ + Checks to see if the function is marked as not requiring authentication + with the @unauthenticated decorator. Returns True if decorator is + set to True, False otherwise. + """ + return getattr(f, 'unauthenticated', False) + + +def service_type(stype): + """ + Adds 'service_type' attribute to decorated function. + Usage: + @service_type('volume') + def mymethod(f): + ... + """ + def inner(f): + f.service_type = stype + return f + return inner + + +def get_service_type(f): + """ + Retrieves service type from function + """ + return getattr(f, 'service_type', None) + + +def pretty_choice_list(l): + return ', '.join("'%s'" % i for i in l) + + +def print_list(objs, fields, formatters={}, sortby_index=None): + if sortby_index is None: + sortby = None + else: + sortby = fields[sortby_index] + mixed_case_fields = ['serverId'] + pt = prettytable.PrettyTable([f for f in fields], caching=False) + pt.align = 'l' + + for o in objs: + row = [] + for field in fields: + if field in formatters: + row.append(formatters[field](o)) + else: + if field in mixed_case_fields: + field_name = field.replace(' ', '_') + else: + field_name = field.lower().replace(' ', '_') + data = getattr(o, field_name, '') + row.append(data) + pt.add_row(row) + + if sortby is not None: + print(strutils.safe_encode(pt.get_string(sortby=sortby))) + else: + print(strutils.safe_encode(pt.get_string())) + + +def print_dict(d, dict_property="Property", dict_value="Value", wrap=0): + pt = prettytable.PrettyTable([dict_property, dict_value], caching=False) + pt.align = 'l' + for k, v in six.iteritems(d): + # convert dict to str to check length + if isinstance(v, dict): + v = str(v) + if wrap > 0: + v = textwrap.fill(str(v), wrap) + # if value has a newline, add in multiple rows + # e.g. fault with stacktrace + if v and isinstance(v, six.string_types) and r'\n' in v: + lines = v.strip().split(r'\n') + col1 = k + for line in lines: + pt.add_row([col1, line]) + col1 = '' + else: + pt.add_row([k, v]) + print(strutils.safe_encode(pt.get_string())) + + +def find_resource(manager, name_or_id, **find_args): + """Helper for the _find_* methods.""" + # first try to get entity as integer id + try: + return manager.get(int(name_or_id)) + except (TypeError, ValueError, exceptions.NotFound): + pass + + # now try to get entity as uuid + try: + uuid.UUID(strutils.safe_encode(name_or_id)) + return manager.get(name_or_id) + except (TypeError, ValueError, exceptions.NotFound): + pass + + # for str id which is not uuid (for Flavor search currently) + if getattr(manager, 'is_alphanum_id_allowed', False): + try: + return manager.get(name_or_id) + except exceptions.NotFound: + pass + + try: + try: + return manager.find(human_id=name_or_id, **find_args) + except exceptions.NotFound: + pass + + # finally try to find entity by name + try: + resource = getattr(manager, 'resource_class', None) + name_attr = resource.NAME_ATTR if resource else 'name' + kwargs = {name_attr: name_or_id} + kwargs.update(find_args) + return manager.find(**kwargs) + except exceptions.NotFound: + msg = "No %s with a name or ID of '%s' exists." % \ + (manager.resource_class.__name__.lower(), name_or_id) + raise exceptions.CommandError(msg) + except exceptions.NoUniqueMatch: + msg = ("Multiple %s matches found for '%s', use an ID to be more" + " specific." % (manager.resource_class.__name__.lower(), + name_or_id)) + raise exceptions.CommandError(msg) + + +def _format_servers_list_networks(server): + output = [] + for (network, addresses) in server.networks.items(): + if len(addresses) == 0: + continue + addresses_csv = ', '.join(addresses) + group = "%s=%s" % (network, addresses_csv) + output.append(group) + + return '; '.join(output) + + +def _format_security_groups(groups): + return ', '.join(group['name'] for group in groups) + + +def _format_field_name(attr): + """Format an object attribute in a human-friendly way.""" + # Split at ':' and leave the extension name as-is. + parts = attr.rsplit(':', 1) + name = parts[-1].replace('_', ' ') + # Don't title() on mixed case + if name.isupper() or name.islower(): + name = name.title() + parts[-1] = name + return ': '.join(parts) + + +def _make_field_formatter(attr, filters=None): + """ + Given an object attribute, return a formatted field name and a + formatter suitable for passing to print_list. + + Optionally pass a dict mapping attribute names to a function. The function + will be passed the value of the attribute and should return the string to + display. + """ + filter_ = None + if filters: + filter_ = filters.get(attr) + + def get_field(obj): + field = getattr(obj, attr, '') + if field and filter_: + field = filter_(field) + return field + + name = _format_field_name(attr) + formatter = get_field + return name, formatter + + +class HookableMixin(object): + """Mixin so classes can register and run hooks.""" + _hooks_map = {} + + @classmethod + def add_hook(cls, hook_type, hook_func): + if hook_type not in cls._hooks_map: + cls._hooks_map[hook_type] = [] + + cls._hooks_map[hook_type].append(hook_func) + + @classmethod + def run_hooks(cls, hook_type, *args, **kwargs): + hook_funcs = cls._hooks_map.get(hook_type) or [] + for hook_func in hook_funcs: + hook_func(*args, **kwargs) + + +def safe_issubclass(*args): + """Like issubclass, but will just return False if not a class.""" + + try: + if issubclass(*args): + return True + except TypeError: + pass + + return False + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + +_slugify_strip_re = re.compile(r'[^\w\s-]') +_slugify_hyphenate_re = re.compile(r'[-\s]+') + + +# http://code.activestate.com/recipes/ +# 577257-slugify-make-a-string-usable-in-a-url-or-filename/ +def slugify(value): + """ + Normalizes string, converts to lowercase, removes non-alpha characters, + and converts spaces to hyphens. + + From Django's "django/template/defaultfilters.py". + """ + import unicodedata + if not isinstance(value, six.text_type): + value = six.text_type(value) + value = unicodedata.normalize('NFKD', value).encode('ascii', + 'ignore').decode("ascii") + value = six.text_type(_slugify_strip_re.sub('', value).strip().lower()) + return _slugify_hyphenate_re.sub('-', value) + + +def _load_entry_point(ep_name, name=None): + """Try to load the entry point ep_name that matches name.""" + for ep in pkg_resources.iter_entry_points(ep_name, name=name): + try: + return ep.load() + except (ImportError, pkg_resources.UnknownExtra, AttributeError): + continue + + +def is_integer_like(val): + """Returns validation of a value as an integer.""" + try: + value = int(val) + return True + except (TypeError, ValueError, AttributeError): + return False diff --git a/awx/lib/site-packages/novaclient/v1_1/__init__.py b/awx/lib/site-packages/novaclient/v1_1/__init__.py new file mode 100644 index 0000000000..19712285f8 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2012 OpenStack Foundation +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v1_1.client import Client # noqa diff --git a/awx/lib/site-packages/novaclient/v1_1/agents.py b/awx/lib/site-packages/novaclient/v1_1/agents.py new file mode 100644 index 0000000000..550d47ab6b --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/agents.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +agent interface +""" + +from novaclient import base + + +class Agent(base.Resource): + def __repr__(self): + return "" % self.agent + + def _add_details(self, info): + dico = 'resource' in info and info['resource'] or info + for (k, v) in dico.items(): + setattr(self, k, v) + + +class AgentsManager(base.ManagerWithFind): + resource_class = Agent + + def list(self, hypervisor=None): + """List all agent builds.""" + url = "/os-agents" + if hypervisor: + url = "/os-agents?hypervisor=%s" % hypervisor + return self._list(url, "agents") + + def update(self, id, version, + url, md5hash): + """Update an existing agent build.""" + body = {'para': { + 'version': version, + 'url': url, + 'md5hash': md5hash}} + return self._update('/os-agents/%s' % id, body, 'agent') + + def create(self, os, architecture, version, + url, md5hash, hypervisor): + """Create a new agent build.""" + body = {'agent': { + 'hypervisor': hypervisor, + 'os': os, + 'architecture': architecture, + 'version': version, + 'url': url, + 'md5hash': md5hash}} + return self._create('/os-agents', body, 'agent') + + def delete(self, id): + """Deletes an existing agent build.""" + self._delete('/os-agents/%s' % id) diff --git a/awx/lib/site-packages/novaclient/v1_1/aggregates.py b/awx/lib/site-packages/novaclient/v1_1/aggregates.py new file mode 100644 index 0000000000..c2df31526d --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/aggregates.py @@ -0,0 +1,95 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Aggregate interface.""" + +from novaclient import base + + +class Aggregate(base.Resource): + """An aggregates is a collection of compute hosts.""" + + def __repr__(self): + return "" % self.id + + def update(self, values): + """Update the name and/or availability zone.""" + return self.manager.update(self, values) + + def add_host(self, host): + return self.manager.add_host(self, host) + + def remove_host(self, host): + return self.manager.remove_host(self, host) + + def set_metadata(self, metadata): + return self.manager.set_metadata(self, metadata) + + def delete(self): + self.manager.delete(self) + + +class AggregateManager(base.ManagerWithFind): + resource_class = Aggregate + + def list(self): + """Get a list of os-aggregates.""" + return self._list('/os-aggregates', 'aggregates') + + def create(self, name, availability_zone): + """Create a new aggregate.""" + body = {'aggregate': {'name': name, + 'availability_zone': availability_zone}} + return self._create('/os-aggregates', body, 'aggregate') + + def get(self, aggregate): + """Get details of the specified aggregate.""" + return self._get('/os-aggregates/%s' % (base.getid(aggregate)), + "aggregate") + + # NOTE:(dtroyer): utils.find_resource() uses manager.get() but we need to + # keep the API backward compatible + def get_details(self, aggregate): + """Get details of the specified aggregate.""" + return self.get(aggregate) + + def update(self, aggregate, values): + """Update the name and/or availability zone.""" + body = {'aggregate': values} + return self._update("/os-aggregates/%s" % base.getid(aggregate), + body, + "aggregate") + + def add_host(self, aggregate, host): + """Add a host into the Host Aggregate.""" + body = {'add_host': {'host': host}} + return self._create("/os-aggregates/%s/action" % base.getid(aggregate), + body, "aggregate") + + def remove_host(self, aggregate, host): + """Remove a host from the Host Aggregate.""" + body = {'remove_host': {'host': host}} + return self._create("/os-aggregates/%s/action" % base.getid(aggregate), + body, "aggregate") + + def set_metadata(self, aggregate, metadata): + """Set a aggregate metadata, replacing the existing metadata.""" + body = {'set_metadata': {'metadata': metadata}} + return self._create("/os-aggregates/%s/action" % base.getid(aggregate), + body, "aggregate") + + def delete(self, aggregate): + """Delete the specified aggregates.""" + self._delete('/os-aggregates/%s' % (base.getid(aggregate))) diff --git a/awx/lib/site-packages/novaclient/v1_1/availability_zones.py b/awx/lib/site-packages/novaclient/v1_1/availability_zones.py new file mode 100644 index 0000000000..41362aa252 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/availability_zones.py @@ -0,0 +1,50 @@ +# Copyright 2011 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Availability Zone interface (1.1 extension). +""" + +from novaclient import base + + +class AvailabilityZone(base.Resource): + """ + An availability zone object. + """ + NAME_ATTR = 'display_name' + + def __repr__(self): + return "" % self.zoneName + + +class AvailabilityZoneManager(base.ManagerWithFind): + """ + Manage :class:`AvailabilityZone` resources. + """ + resource_class = AvailabilityZone + + def list(self, detailed=True): + """ + Get a list of all availability zones. + + :rtype: list of :class:`AvailabilityZone` + """ + if detailed is True: + return self._list("/os-availability-zone/detail", + "availabilityZoneInfo") + else: + return self._list("/os-availability-zone", "availabilityZoneInfo") diff --git a/awx/lib/site-packages/novaclient/v1_1/certs.py b/awx/lib/site-packages/novaclient/v1_1/certs.py new file mode 100644 index 0000000000..b33d521342 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/certs.py @@ -0,0 +1,48 @@ +# Copyright 2010 Jacob Kaplan-Moss + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Certificate interface. +""" + +from novaclient import base + + +class Certificate(base.Resource): + def __repr__(self): + return "" % \ + (len(self.private_key) if self.private_key else 0, + len(self.data)) + + +class CertificateManager(base.Manager): + """ + Manage :class:`Certificate` resources. + """ + resource_class = Certificate + + def create(self): + """ + Create a x509 certificates for a user in tenant. + """ + return self._create('/os-certificates', {}, 'certificate') + + def get(self): + """ + Get root certificate. + """ + return self._get("/os-certificates/root", 'certificate') diff --git a/awx/lib/site-packages/novaclient/v1_1/client.py b/awx/lib/site-packages/novaclient/v1_1/client.py new file mode 100644 index 0000000000..50ab46cff8 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/client.py @@ -0,0 +1,171 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import client +from novaclient.v1_1 import agents +from novaclient.v1_1 import certs +from novaclient.v1_1 import cloudpipe +from novaclient.v1_1 import aggregates +from novaclient.v1_1 import availability_zones +from novaclient.v1_1 import coverage_ext +from novaclient.v1_1 import flavors +from novaclient.v1_1 import flavor_access +from novaclient.v1_1 import floating_ip_dns +from novaclient.v1_1 import floating_ips +from novaclient.v1_1 import floating_ip_pools +from novaclient.v1_1 import fping +from novaclient.v1_1 import hosts +from novaclient.v1_1 import hypervisors +from novaclient.v1_1 import images +from novaclient.v1_1 import keypairs +from novaclient.v1_1 import limits +from novaclient.v1_1 import networks +from novaclient.v1_1 import quota_classes +from novaclient.v1_1 import quotas +from novaclient.v1_1 import security_group_rules +from novaclient.v1_1 import security_groups +from novaclient.v1_1 import servers +from novaclient.v1_1 import usage +from novaclient.v1_1 import virtual_interfaces +from novaclient.v1_1 import volumes +from novaclient.v1_1 import volume_snapshots +from novaclient.v1_1 import volume_types +from novaclient.v1_1 import services +from novaclient.v1_1 import fixed_ips +from novaclient.v1_1 import floating_ips_bulk + + +class Client(object): + """ + Top-level object to access the OpenStack Compute API. + + Create an instance with your creds:: + + >>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL) + + Then call methods on its managers:: + + >>> client.servers.list() + ... + >>> client.flavors.list() + ... + + """ + + # FIXME(jesse): project_id isn't required to authenticate + def __init__(self, username, api_key, project_id, auth_url=None, + insecure=False, timeout=None, proxy_tenant_id=None, + proxy_token=None, region_name=None, + endpoint_type='publicURL', extensions=None, + service_type='compute', service_name=None, + volume_service_name=None, timings=False, + bypass_url=None, os_cache=False, no_cache=True, + http_log_debug=False, auth_system='keystone', + auth_plugin=None, + cacert=None, tenant_id=None): + # FIXME(comstud): Rename the api_key argument above when we + # know it's not being used as keyword argument + password = api_key + self.projectid = project_id + self.tenant_id = tenant_id + self.flavors = flavors.FlavorManager(self) + self.flavor_access = flavor_access.FlavorAccessManager(self) + self.images = images.ImageManager(self) + self.limits = limits.LimitsManager(self) + self.servers = servers.ServerManager(self) + + # extensions + self.agents = agents.AgentsManager(self) + self.dns_domains = floating_ip_dns.FloatingIPDNSDomainManager(self) + self.dns_entries = floating_ip_dns.FloatingIPDNSEntryManager(self) + self.cloudpipe = cloudpipe.CloudpipeManager(self) + self.certs = certs.CertificateManager(self) + self.floating_ips = floating_ips.FloatingIPManager(self) + self.floating_ip_pools = floating_ip_pools.FloatingIPPoolManager(self) + self.fping = fping.FpingManager(self) + self.volumes = volumes.VolumeManager(self) + self.volume_snapshots = volume_snapshots.SnapshotManager(self) + self.volume_types = volume_types.VolumeTypeManager(self) + self.keypairs = keypairs.KeypairManager(self) + self.networks = networks.NetworkManager(self) + self.quota_classes = quota_classes.QuotaClassSetManager(self) + self.quotas = quotas.QuotaSetManager(self) + self.security_groups = security_groups.SecurityGroupManager(self) + self.security_group_rules = \ + security_group_rules.SecurityGroupRuleManager(self) + self.usage = usage.UsageManager(self) + self.virtual_interfaces = \ + virtual_interfaces.VirtualInterfaceManager(self) + self.aggregates = aggregates.AggregateManager(self) + self.hosts = hosts.HostManager(self) + self.hypervisors = hypervisors.HypervisorManager(self) + self.services = services.ServiceManager(self) + self.fixed_ips = fixed_ips.FixedIPsManager(self) + self.floating_ips_bulk = floating_ips_bulk.FloatingIPBulkManager(self) + self.os_cache = os_cache or not no_cache + self.coverage = coverage_ext.CoverageManager(self) + self.availability_zones = \ + availability_zones.AvailabilityZoneManager(self) + + # Add in any extensions... + if extensions: + for extension in extensions: + if extension.manager_class: + setattr(self, extension.name, + extension.manager_class(self)) + + self.client = client.HTTPClient(username, + password, + projectid=project_id, + tenant_id=tenant_id, + auth_url=auth_url, + insecure=insecure, + timeout=timeout, + auth_system=auth_system, + auth_plugin=auth_plugin, + proxy_token=proxy_token, + proxy_tenant_id=proxy_tenant_id, + region_name=region_name, + endpoint_type=endpoint_type, + service_type=service_type, + service_name=service_name, + volume_service_name=volume_service_name, + timings=timings, + bypass_url=bypass_url, + os_cache=self.os_cache, + http_log_debug=http_log_debug, + cacert=cacert) + + def set_management_url(self, url): + self.client.set_management_url(url) + + def get_timings(self): + return self.client.get_timings() + + def reset_timings(self): + self.client.reset_timings() + + def authenticate(self): + """ + Authenticate against the server. + + Normally this is called automatically when you first access the API, + but you can call this method to force authentication right now. + + Returns on success; raises :exc:`exceptions.Unauthorized` if the + credentials are wrong. + """ + self.client.authenticate() diff --git a/awx/lib/site-packages/novaclient/v1_1/cloudpipe.py b/awx/lib/site-packages/novaclient/v1_1/cloudpipe.py new file mode 100644 index 0000000000..19a87d1727 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/cloudpipe.py @@ -0,0 +1,61 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cloudpipe interface.""" + +from novaclient import base + + +class Cloudpipe(base.Resource): + """A cloudpipe instance is a VPN attached to a proejct's VLAN.""" + + def __repr__(self): + return "" % self.project_id + + def delete(self): + self.manager.delete(self) + + +class CloudpipeManager(base.ManagerWithFind): + resource_class = Cloudpipe + + def create(self, project): + """ + Launch a cloudpipe instance. + + :param project: name of the project for the cloudpipe + """ + body = {'cloudpipe': {'project_id': project}} + return self._create('/os-cloudpipe', body, 'instance_id', + return_raw=True) + + def list(self): + """ + Get a list of cloudpipe instances. + """ + return self._list('/os-cloudpipe', 'cloudpipes') + + def update(self, address, port): + """ + Update VPN address and port for all networks associated + with the project defined by authentication + + :param address: IP address + :param port: Port number + """ + + body = {'configure_project': {'vpn_ip': address, + 'vpn_port': port}} + self._update("/os-cloudpipe/configure-project", body) diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/__init__.py b/awx/lib/site-packages/novaclient/v1_1/contrib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/assisted_volume_snapshots.py b/awx/lib/site-packages/novaclient/v1_1/contrib/assisted_volume_snapshots.py new file mode 100644 index 0000000000..0f1773b497 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/assisted_volume_snapshots.py @@ -0,0 +1,48 @@ +# Copyright (C) 2013, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Assisted volume snapshots - to be used by Cinder and not end users. +""" + +import json + +from novaclient import base + + +class Snapshot(base.Resource): + def __repr__(self): + return "" % self.id + + def delete(self): + """ + Delete this snapshot. + """ + self.manager.delete(self) + + +class AssistedSnapshotManager(base.Manager): + resource_class = Snapshot + + def create(self, volume_id, create_info): + body = {'snapshot': {'volume_id': volume_id, + 'create_info': create_info}} + return self._create('/os-assisted-volume-snapshots', body, 'snapshot') + + def delete(self, snapshot, delete_info): + self._delete("/os-assisted-volume-snapshots/%s?delete_info=%s" % ( + base.getid(snapshot), json.dumps(delete_info))) + +manager_class = AssistedSnapshotManager +name = 'assisted_volume_snapshots' diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/baremetal.py b/awx/lib/site-packages/novaclient/v1_1/contrib/baremetal.py new file mode 100644 index 0000000000..db1f54977c --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/baremetal.py @@ -0,0 +1,303 @@ +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Baremetal interface (v2 extension). +""" +from novaclient import base +from novaclient import utils + + +class BareMetalNode(base.Resource): + """ + A baremetal node (typically a physical server or an empty VM). + """ + + def __repr__(self): + return "" % self.id + + +class BareMetalNodeInterface(base.Resource): + """ + An interface belonging to a baremetal node. + """ + + def __repr__(self): + return "" % self.id + + +class BareMetalNodeManager(base.ManagerWithFind): + """ + Manage :class:`BareMetalNode` resources. + """ + resource_class = BareMetalNode + + def create(self, + service_host, + cpus, + memory_mb, + local_gb, + prov_mac_address, + pm_address=None, + pm_user=None, + pm_password=None, + terminal_port=None): + """ + Create a baremetal node. + + :param service_host: Name of controlling compute host + :param cpus: Number of CPUs in the node + :param memory_mb: Megabytes of RAM in the node + :param local_gb: Gigabytes of local storage in the node + :param pm_address: Power management IP for the node + :param pm_user: Username for the node's power management + :param pm_password: Password for the node's power management + :param prov_mac_address: MAC address to provision the node + :param terminal_port: ShellInABox port + :rtype: :class:`BareMetalNode` + """ + body = {'node': {'service_host': service_host, + 'cpus': cpus, + 'memory_mb': memory_mb, + 'local_gb': local_gb, + 'pm_address': pm_address, + 'pm_user': pm_user, + 'pm_password': pm_password, + 'prov_mac_address': prov_mac_address, + 'terminal_port': terminal_port}} + + return self._create('/os-baremetal-nodes', body, 'node') + + def delete(self, node): + """ + Delete a baremetal node. + + :param node: The :class:`BareMetalNode` to delete. + """ + self._delete('/os-baremetal-nodes/%s' % base.getid(node)) + + def get(self, node_id): + """ + Get a baremetal node. + + :param node_id: The ID of the node to delete. + :rtype: :class:`BareMetalNode` + """ + return self._get("/os-baremetal-nodes/%s" % node_id, 'node') + + def list(self): + """ + Get a list of all baremetal nodes. + + :rtype: list of :class:`BareMetalNode` + """ + return self._list('/os-baremetal-nodes', 'nodes') + + def add_interface(self, node_id, address, datapath_id=0, port_no=0): + """ + Add an interface to a baremetal node. + + :param node_id: The ID of the node to modify. + :param address: The MAC address to add. + :param datapath_id: Datapath ID of OpenFlow switch for the interface + :param port_no: OpenFlow port number for the interface + :rtype: :class:`BareMetalNodeInterface` + """ + body = {'add_interface': {'address': address, + 'datapath_id': datapath_id, + 'port_no': port_no}} + url = '/os-baremetal-nodes/%s/action' % node_id + _resp, body = self.api.client.post(url, body=body) + return BareMetalNodeInterface(self, body['interface']) + + def remove_interface(self, node_id, address): + """ + Remove an interface from a baremetal node. + + :param node_id: The ID of the node to modify. + :param address: The MAC address to remove. + :rtype: bool + """ + req_body = {'remove_interface': {'address': address}} + url = '/os-baremetal-nodes/%s/action' % node_id + self.api.client.post(url, body=req_body) + + def list_interfaces(self, node_id): + """ + List the interfaces on a baremetal node. + + :param node_id: The ID of the node to list. + :rtype: list + """ + interfaces = [] + node = self._get("/os-baremetal-nodes/%s" % node_id, 'node') + for interface in node.interfaces: + interface_object = BareMetalNodeInterface(self, interface) + interfaces.append(interface_object) + return interfaces + + +@utils.arg('service_host', + metavar='', + help='Name of nova compute host which will control this baremetal node') +@utils.arg('cpus', + metavar='', + type=int, + help='Number of CPUs in the node') +@utils.arg('memory_mb', + metavar='', + type=int, + help='Megabytes of RAM in the node') +@utils.arg('local_gb', + metavar='', + type=int, + help='Gigabytes of local storage in the node') +@utils.arg('prov_mac_address', + metavar='', + help='MAC address to provision the node') +@utils.arg('--pm_address', default=None, + metavar='', + help='Power management IP for the node') +@utils.arg('--pm_user', default=None, + metavar='', + help='Username for the node\'s power management') +@utils.arg('--pm_password', default=None, + metavar='', + help='Password for the node\'s power management') +@utils.arg('--terminal_port', default=None, + metavar='', + type=int, + help='ShellInABox port?') +def do_baremetal_node_create(cs, args): + """Create a baremetal node.""" + node = cs.baremetal.create(args.service_host, args.cpus, + args.memory_mb, args.local_gb, args.prov_mac_address, + pm_address=args.pm_address, pm_user=args.pm_user, + pm_password=args.pm_password, + terminal_port=args.terminal_port) + _print_baremetal_resource(node) + + +@utils.arg('node', + metavar='', + help='ID of the node to delete.') +def do_baremetal_node_delete(cs, args): + """Remove a baremetal node and any associated interfaces.""" + node = _find_baremetal_node(cs, args.node) + cs.baremetal.delete(node) + + +def _translate_baremetal_node_keys(collection): + convert = [('service_host', 'host'), + ('local_gb', 'disk_gb'), + ('prov_mac_address', 'mac_address'), + ('pm_address', 'pm_address'), + ('pm_user', 'pm_username'), + ('pm_password', 'pm_password'), + ('terminal_port', 'terminal_port'), + ] + for item in collection: + keys = item.__dict__.keys() + for from_key, to_key in convert: + if from_key in keys and to_key not in keys: + setattr(item, to_key, item._info[from_key]) + + +def _print_baremetal_nodes_list(nodes): + """Print the list of baremetal nodes.""" + _translate_baremetal_node_keys(nodes) + utils.print_list(nodes, [ + 'ID', + 'Host', + 'CPUs', + 'Memory_MB', + 'Disk_GB', + 'MAC Address', + 'PM Address', + 'PM Username', + 'PM Password', + 'Terminal Port', + ]) + + +def do_baremetal_node_list(cs, _args): + """Print list of available baremetal nodes.""" + nodes = cs.baremetal.list() + _print_baremetal_nodes_list(nodes) + + +def _find_baremetal_node(cs, node): + """Get a node by ID.""" + return utils.find_resource(cs.baremetal, node) + + +def _print_baremetal_resource(resource): + """Print details of a baremetal resource.""" + info = resource._info.copy() + utils.print_dict(info) + + +def _print_baremetal_node_interfaces(interfaces): + """Print interfaces of a baremetal node.""" + utils.print_list(interfaces, [ + 'ID', + 'Datapath_ID', + 'Port_No', + 'Address', + ]) + + +@utils.arg('node', + metavar='', + help="ID of node") +def do_baremetal_node_show(cs, args): + """Show information about a baremetal node.""" + node = _find_baremetal_node(cs, args.node) + _print_baremetal_resource(node) + + +@utils.arg('node', + metavar='', + help="ID of node") +@utils.arg('address', + metavar='
', + help="MAC address of interface") +@utils.arg('--datapath_id', + default=0, + metavar='', + help="OpenFlow Datapath ID of interface") +@utils.arg('--port_no', + default=0, + metavar='', + help="OpenFlow port number of interface") +def do_baremetal_interface_add(cs, args): + """Add a network interface to a baremetal node.""" + bmif = cs.baremetal.add_interface(args.node, args.address, + args.datapath_id, args.port_no) + _print_baremetal_resource(bmif) + + +@utils.arg('node', metavar='', help="ID of node") +@utils.arg('address', metavar='
', help="MAC address of interface") +def do_baremetal_interface_remove(cs, args): + """Remove a network interface from a baremetal node.""" + cs.baremetal.remove_interface(args.node, args.address) + + +@utils.arg('node', metavar='', help="ID of node") +def do_baremetal_interface_list(cs, args): + """List network interfaces associated with a baremetal node.""" + interfaces = cs.baremetal.list_interfaces(args.node) + _print_baremetal_node_interfaces(interfaces) diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/cells.py b/awx/lib/site-packages/novaclient/v1_1/contrib/cells.py new file mode 100644 index 0000000000..a6cd78a4ea --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/cells.py @@ -0,0 +1,69 @@ +# Copyright 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base +from novaclient import utils + + +class Cell(base.Resource): + def __repr__(self): + return "" % self.name + + +class CellsManager(base.Manager): + resource_class = Cell + + def get(self, cell_name): + """ + Get a cell. + + :param cell: Name of the :class:`Cell` to get. + :rtype: :class:`Cell` + """ + return self._get("/os-cells/%s" % cell_name, "cell") + + def capacities(self, cell_name=None): + """ + Get capacities for a cell. + + :param cell: Name of the :class:`Cell` to get capacities for. + :rtype: :class:`Cell` + """ + path = ["%s/capacities" % cell_name, "capacities"][cell_name is None] + return self._get("/os-cells/%s" % path, "cell") + + +@utils.arg('cell', + metavar='', + help='Name of the cell.') +def do_cell_show(cs, args): + """Show details of a given cell.""" + cell = cs.cells.get(args.cell) + utils.print_dict(cell._info) + + +@utils.arg('--cell', + metavar='', + help="Name of the cell to get the capacities.", + default=None) +def do_cell_capacities(cs, args): + """Get cell capacities for all cells or a given cell.""" + cell = cs.cells.capacities(args.cell) + print("Ram Available: %s MB" % cell.capacities['ram_free']['total_mb']) + utils.print_dict(cell.capacities['ram_free']['units_by_mb'], + dict_property='Ram(MB)', dict_value="Units") + print("\nDisk Available: %s MB" % cell.capacities['disk_free']['total_mb']) + utils.print_dict(cell.capacities['disk_free']['units_by_mb'], + dict_property='Disk(MB)', dict_value="Units") diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/deferred_delete.py b/awx/lib/site-packages/novaclient/v1_1/contrib/deferred_delete.py new file mode 100644 index 0000000000..1412702d57 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/deferred_delete.py @@ -0,0 +1,27 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from novaclient import utils + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_force_delete(cs, args): + """Force delete a server.""" + utils.find_resource(cs.servers, args.server).force_delete() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_restore(cs, args): + """Restore a soft-deleted server.""" + utils.find_resource(cs.servers, args.server).restore() diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/host_evacuate.py b/awx/lib/site-packages/novaclient/v1_1/contrib/host_evacuate.py new file mode 100644 index 0000000000..c8acef8b51 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/host_evacuate.py @@ -0,0 +1,59 @@ +# Copyright 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base +from novaclient import utils + + +class EvacuateHostResponse(base.Resource): + pass + + +def _server_evacuate(cs, server, args): + success = True + error_message = "" + try: + cs.servers.evacuate(server['uuid'], args.target_host, + args.on_shared_storage) + except Exception as e: + success = False + error_message = "Error while evacuating instance: %s" % e + return EvacuateHostResponse(base.Manager, + {"server_uuid": server['uuid'], + "evacuate_accepted": success, + "error_message": error_message}) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('--target_host', + metavar='', + default=None, + help='Name of target host.') +@utils.arg('--on-shared-storage', + dest='on_shared_storage', + action="store_true", + default=False, + help='Specifies whether all instances files are on shared storage') +def do_host_evacuate(cs, args): + """Evacuate all instances from failed host to specified one.""" + hypervisors = cs.hypervisors.search(args.host, servers=True) + response = [] + for hyper in hypervisors: + if hasattr(hyper, 'servers'): + for server in hyper.servers: + response.append(_server_evacuate(cs, server, args)) + + utils.print_list(response, + ["Server UUID", "Evacuate Accepted", "Error Message"]) diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/host_servers_migrate.py b/awx/lib/site-packages/novaclient/v1_1/contrib/host_servers_migrate.py new file mode 100644 index 0000000000..3076dc51dd --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/host_servers_migrate.py @@ -0,0 +1,49 @@ +# Copyright 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base +from novaclient import utils + + +class HostServersMigrateResponse(base.Resource): + pass + + +def _server_migrate(cs, server): + success = True + error_message = "" + try: + cs.servers.migrate(server['uuid']) + except Exception as e: + success = False + error_message = "Error while migrating instance: %s" % e + return HostServersMigrateResponse(base.Manager, + {"server_uuid": server['uuid'], + "migration_accepted": success, + "error_message": error_message}) + + +@utils.arg('host', metavar='', help='Name of host.') +def do_host_servers_migrate(cs, args): + """Migrate all instances of the specified host to other available hosts.""" + hypervisors = cs.hypervisors.search(args.host, servers=True) + response = [] + for hyper in hypervisors: + if hasattr(hyper, 'servers'): + for server in hyper.servers: + response.append(_server_migrate(cs, server)) + + utils.print_list(response, + ["Server UUID", "Migration Accepted", "Error Message"]) diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/instance_action.py b/awx/lib/site-packages/novaclient/v1_1/contrib/instance_action.py new file mode 100644 index 0000000000..6e64d0b50e --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/instance_action.py @@ -0,0 +1,66 @@ +# Copyright 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pprint + +from novaclient import base +from novaclient import utils + + +class InstanceActionManager(base.ManagerWithFind): + resource_class = base.Resource + + def get(self, server, request_id): + """ + Get details of an action performed on an instance. + + :param request_id: The request_id of the action to get. + """ + return self._get("/servers/%s/os-instance-actions/%s" % + (base.getid(server), request_id), 'instanceAction') + + def list(self, server): + """ + Get a list of actions performed on an server. + """ + return self._list('/servers/%s/os-instance-actions' % + base.getid(server), 'instanceActions') + + +@utils.arg('server', + metavar='', + help='Name or UUID of the server to show an action for.') +@utils.arg('request_id', + metavar='', + help='Request ID of the action to get.') +def do_instance_action(cs, args): + """Show an action.""" + server = utils.find_resource(cs.servers, args.server) + action_resource = cs.instance_action.get(server, args.request_id) + action = action_resource._info + if 'events' in action: + action['events'] = pprint.pformat(action['events']) + utils.print_dict(action) + + +@utils.arg('server', + metavar='', + help='Name or UUID of the server to list actions for.') +def do_instance_action_list(cs, args): + """List actions on a server.""" + server = utils.find_resource(cs.servers, args.server) + actions = cs.instance_action.list(server) + utils.print_list(actions, + ['Action', 'Request_ID', 'Message', 'Start_Time'], sortby_index=3) diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/list_extensions.py b/awx/lib/site-packages/novaclient/v1_1/contrib/list_extensions.py new file mode 100644 index 0000000000..7eb9f16c8a --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/list_extensions.py @@ -0,0 +1,46 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base +from novaclient import utils + + +class ListExtResource(base.Resource): + @property + def summary(self): + descr = self.description.strip() + if not descr: + return '??' + lines = descr.split("\n") + if len(lines) == 1: + return lines[0] + else: + return lines[0] + "..." + + +class ListExtManager(base.Manager): + resource_class = ListExtResource + + def show_all(self): + return self._list("/extensions", 'extensions') + + +def do_list_extensions(client, _args): + """ + List all the os-api extensions that are available. + """ + extensions = client.list_extensions.show_all() + fields = ["Name", "Summary", "Alias", "Updated"] + utils.print_list(extensions, fields) diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/metadata_extensions.py b/awx/lib/site-packages/novaclient/v1_1/contrib/metadata_extensions.py new file mode 100644 index 0000000000..16e4ad13de --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/metadata_extensions.py @@ -0,0 +1,43 @@ +# Copyright 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import utils +from novaclient.v1_1 import shell + + +@utils.arg('host', + metavar='', + help='Name of host.') +@utils.arg('action', + metavar='', + choices=['set', 'delete'], + help="Actions: 'set' or 'delete'") +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Metadata to set or delete (only key is necessary on delete)') +def do_host_meta(cs, args): + """Set or Delete metadata on all instances of a host.""" + hypervisors = cs.hypervisors.search(args.host, servers=True) + for hyper in hypervisors: + metadata = shell._extract_metadata(args) + if hasattr(hyper, 'servers'): + for server in hyper.servers: + if args.action == 'set': + cs.servers.set_meta(server['uuid'], metadata) + elif args.action == 'delete': + cs.servers.delete_meta(server['uuid'], metadata.keys()) diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/migrations.py b/awx/lib/site-packages/novaclient/v1_1/contrib/migrations.py new file mode 100644 index 0000000000..ee2f49963b --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/migrations.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +migration interface +""" + +import urllib + +from novaclient import base +from novaclient import utils + + +class Migration(base.Resource): + def __repr__(self): + return "" % self.id + + +class MigrationManager(base.ManagerWithFind): + resource_class = Migration + + def list(self, host=None, status=None, cell_name=None): + """ + Get a list of migrations. + :param host: (optional) filter migrations by host name. + :param status: (optional) filter migrations by status. + :param cell_name: (optional) filter migrations for a cell. + """ + opts = {} + if host: + opts['host'] = host + if status: + opts['status'] = status + if cell_name: + opts['cell_name'] = cell_name + + query_string = "?%s" % urllib.urlencode(opts) if opts else "" + + return self._list("/os-migrations%s" % query_string, "migrations") + + +@utils.arg('--host', + dest='host', + metavar='', + help='Fetch migrations for the given host.') +@utils.arg('--status', + dest='status', + metavar='', + help='Fetch migrations for the given status.') +@utils.arg('--cell_name', + dest='cell_name', + metavar='', + help='Fetch migrations for the given cell_name.') +def do_migration_list(cs, args): + """Print a list of migrations.""" + _print_migrations(cs.migrations.list(args.host, args.status, + args.cell_name)) + + +def _print_migrations(migrations): + fields = ['Source Node', 'Dest Node', 'Source Compute', 'Dest Compute', + 'Dest Host', 'Status', 'Instance UUID', 'Old Flavor', + 'New Flavor', 'Created At', 'Updated At'] + + def old_flavor(migration): + return migration.old_instance_type_id + + def new_flavor(migration): + return migration.new_instance_type_id + + formatters = {'Old Flavor': old_flavor, 'New Flavor': new_flavor} + + utils.print_list(migrations, fields, formatters) diff --git a/awx/lib/site-packages/novaclient/v1_1/contrib/tenant_networks.py b/awx/lib/site-packages/novaclient/v1_1/contrib/tenant_networks.py new file mode 100644 index 0000000000..9ac97f110d --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/contrib/tenant_networks.py @@ -0,0 +1,77 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from novaclient import base +from novaclient import utils + + +class TenantNetwork(base.Resource): + def delete(self): + self.manager.delete(network=self) + + +class TenantNetworkManager(base.ManagerWithFind): + resource_class = base.Resource + + def list(self): + return self._list('/os-tenant-networks', 'networks') + + def get(self, network): + return self._get('/os-tenant-networks/%s' % base.getid(network), + 'network') + + def delete(self, network): + self._delete('/os-tenant-networks/%s' % base.getid(network)) + + def create(self, label, cidr): + body = {'network': {'label': label, 'cidr': cidr}} + return self._create('/os-tenant-networks', body, 'network') + + +@utils.arg('network_id', metavar='', help='ID of network') +def do_net(cs, args): + """ + Show a network + """ + network = cs.tenant_networks.get(args.network_id) + utils.print_dict(network._info) + + +def do_net_list(cs, args): + """ + List networks + """ + networks = cs.tenant_networks.list() + utils.print_list(networks, ['ID', 'Label', 'CIDR']) + + +@utils.arg('label', metavar='', + help='Network label (ex. my_new_network)') +@utils.arg('cidr', metavar='', + help='IP block to allocate from (ex. 172.16.0.0/24 or ' + '2001:DB8::/64)') +def do_net_create(cs, args): + """ + Create a network + """ + network = cs.tenant_networks.create(args.label, args.cidr) + utils.print_dict(network._info) + + +@utils.arg('network_id', metavar='', help='ID of network') +def do_net_delete(cs, args): + """ + Delete a network + """ + cs.tenant_networks.delete(args.network_id) diff --git a/awx/lib/site-packages/novaclient/v1_1/coverage_ext.py b/awx/lib/site-packages/novaclient/v1_1/coverage_ext.py new file mode 100644 index 0000000000..92fc5a880a --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/coverage_ext.py @@ -0,0 +1,60 @@ +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base + + +class Coverage(base.Resource): + def __repr__(self): + return "" % self.name + + +class CoverageManager(base.Manager): + + resource_class = Coverage + + def start(self, combine=False, **kwargs): + body = {'start': {}} + if combine: + body['start'] = {'combine': True} + self.run_hooks('modify_body_for_action', body) + url = '/os-coverage/action' + return self.api.client.post(url, body=body) + + def stop(self): + body = {'stop': {}} + self.run_hooks('modify_body_for_action', body) + url = '/os-coverage/action' + return self.api.client.post(url, body=body) + + def report(self, filename, xml=False, html=False): + body = { + 'report': { + 'file': filename, + } + } + if xml: + body['report']['xml'] = True + elif html: + body['report']['html'] = True + self.run_hooks('modify_body_for_action', body) + url = '/os-coverage/action' + return self.api.client.post(url, body=body) + + def reset(self): + body = {'reset': {}} + self.run_hooks('modify_body_for_action', body) + url = '/os-coverage/action' + return self.api.client.post(url, body=body) diff --git a/awx/lib/site-packages/novaclient/v1_1/fixed_ips.py b/awx/lib/site-packages/novaclient/v1_1/fixed_ips.py new file mode 100644 index 0000000000..fd8f3917af --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/fixed_ips.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Fixed IPs interface. +""" + +from novaclient import base + + +class FixedIP(base.Resource): + def __repr__(self): + return "" % self.address + + +class FixedIPsManager(base.Manager): + resource_class = FixedIP + + def get(self, fixed_ip): + """ + Show information for a Fixed IP + + :param fixed_ip: Fixed IP address to get info for + """ + return self._get('/os-fixed-ips/%s' % base.getid(fixed_ip), + "fixed_ip") + + def reserve(self, fixed_ip): + """Reserve a Fixed IP + + :param fixed_ip: Fixed IP address to reserve + """ + body = {"reserve": None} + self.api.client.post('/os-fixed-ips/%s/action' % base.getid(fixed_ip), + body=body) + + def unreserve(self, fixed_ip): + """Unreserve a Fixed IP + + :param fixed_ip: Fixed IP address to unreserve + """ + body = {"unreserve": None} + self.api.client.post('/os-fixed-ips/%s/action' % base.getid(fixed_ip), + body=body) diff --git a/awx/lib/site-packages/novaclient/v1_1/flavor_access.py b/awx/lib/site-packages/novaclient/v1_1/flavor_access.py new file mode 100644 index 0000000000..b314040fe1 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/flavor_access.py @@ -0,0 +1,68 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Flavor access interface.""" + +from novaclient import base + + +class FlavorAccess(base.Resource): + def __repr__(self): + return "" % self.name + + +class FlavorAccessManager(base.ManagerWithFind): + """ + Manage :class:`FlavorAccess` resources. + """ + resource_class = FlavorAccess + + def list(self, **kwargs): + if kwargs.get('flavor', None): + return self._list_by_flavor(kwargs['flavor']) + elif kwargs.get('tenant', None): + return self._list_by_tenant(kwargs['tenant']) + else: + raise NotImplementedError('Unknown list options.') + + def _list_by_flavor(self, flavor): + return self._list('/flavors/%s/os-flavor-access' % base.getid(flavor), + 'flavor_access') + + def _list_by_tenant(self, tenant): + """Print flavor list shared with the given tenant.""" + # TODO(uni): need to figure out a proper URI for list_by_tenant + # since current API already provided current tenant_id information + raise NotImplementedError('Sorry, query by tenant not supported.') + + def add_tenant_access(self, flavor, tenant): + """Add a tenant to the given flavor access list.""" + info = {'tenant': tenant} + return self._action('addTenantAccess', flavor, info) + + def remove_tenant_access(self, flavor, tenant): + """Remove a tenant from the given flavor access list.""" + info = {'tenant': tenant} + return self._action('removeTenantAccess', flavor, info) + + def _action(self, action, flavor, info, **kwargs): + """Perform a flavor action.""" + body = {action: info} + self.run_hooks('modify_body_for_action', body, **kwargs) + url = '/flavors/%s/action' % base.getid(flavor) + _resp, body = self.api.client.post(url, body=body) + + return [self.resource_class(self, res) + for res in body['flavor_access']] diff --git a/awx/lib/site-packages/novaclient/v1_1/flavors.py b/awx/lib/site-packages/novaclient/v1_1/flavors.py new file mode 100644 index 0000000000..61f615c0f2 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/flavors.py @@ -0,0 +1,188 @@ +# Copyright 2010 Jacob Kaplan-Moss +""" +Flavor interface. +""" +from novaclient import base +from novaclient import exceptions +from novaclient import utils +from novaclient.openstack.common.py3kcompat import urlutils + + +class Flavor(base.Resource): + """ + A flavor is an available hardware configuration for a server. + """ + HUMAN_ID = True + + def __repr__(self): + return "" % self.name + + @property + def ephemeral(self): + """ + Provide a user-friendly accessor to OS-FLV-EXT-DATA:ephemeral + """ + return self._info.get("OS-FLV-EXT-DATA:ephemeral", 'N/A') + + @property + def is_public(self): + """ + Provide a user-friendly accessor to os-flavor-access:is_public + """ + return self._info.get("os-flavor-access:is_public", 'N/A') + + def get_keys(self): + """ + Get extra specs from a flavor. + + :param flavor: The :class:`Flavor` to get extra specs from + """ + _resp, body = self.manager.api.client.get( + "/flavors/%s/os-extra_specs" % + base.getid(self)) + return body["extra_specs"] + + def set_keys(self, metadata): + """ + Set extra specs on a flavor. + + :param flavor: The :class:`Flavor` to set extra spec on + :param metadata: A dict of key/value pairs to be set + """ + body = {'extra_specs': metadata} + return self.manager._create( + "/flavors/%s/os-extra_specs" % base.getid(self), + body, + "extra_specs", + return_raw=True) + + def unset_keys(self, keys): + """ + Unset extra specs on a flavor. + + :param flavor: The :class:`Flavor` to unset extra spec on + :param keys: A list of keys to be unset + """ + for k in keys: + return self.manager._delete( + "/flavors/%s/os-extra_specs/%s" % ( + base.getid(self), k)) + + def delete(self): + """ + Delete this flavor. + """ + self.manager.delete(self) + + +class FlavorManager(base.ManagerWithFind): + """ + Manage :class:`Flavor` resources. + """ + resource_class = Flavor + is_alphanum_id_allowed = True + + def list(self, detailed=True, is_public=True): + """ + Get a list of all flavors. + + :rtype: list of :class:`Flavor`. + """ + qparams = {} + # is_public is ternary - None means give all flavors. + # By default Nova assumes True and gives admins public flavors + # and flavors from their own projects only. + if not is_public: + qparams['is_public'] = is_public + query_string = "?%s" % urlutils.urlencode(qparams) if qparams else "" + + detail = "" + if detailed: + detail = "/detail" + + return self._list("/flavors%s%s" % (detail, query_string), "flavors") + + def get(self, flavor): + """ + Get a specific flavor. + + :param flavor: The ID of the :class:`Flavor` to get. + :rtype: :class:`Flavor` + """ + return self._get("/flavors/%s" % base.getid(flavor), "flavor") + + def delete(self, flavor): + """ + Delete a specific flavor. + + :param flavor: The ID of the :class:`Flavor` to get. + :param purge: Whether to purge record from the database + """ + self._delete("/flavors/%s" % base.getid(flavor)) + + def create(self, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """ + Create (allocate) a floating ip for a tenant + + :param name: Descriptive name of the flavor + :param ram: Memory in MB for the flavor + :param vcpu: Number of VCPUs for the flavor + :param disk: Size of local disk in GB + :param flavorid: ID for the flavor (optional). You can use the reserved + value ``"auto"`` to have Nova generate a UUID for the + flavor in cases where you cannot simply pass ``None``. + :param swap: Swap space in MB + :param rxtx_factor: RX/TX factor + :rtype: :class:`Flavor` + """ + + try: + ram = int(ram) + except (TypeError, ValueError): + raise exceptions.CommandError("Ram must be an integer.") + try: + vcpus = int(vcpus) + except (TypeError, ValueError): + raise exceptions.CommandError("VCPUs must be an integer.") + try: + disk = int(disk) + except (TypeError, ValueError): + raise exceptions.CommandError("Disk must be an integer.") + + if flavorid == "auto": + flavorid = None + + try: + swap = int(swap) + except (TypeError, ValueError): + raise exceptions.CommandError("Swap must be an integer.") + try: + ephemeral = int(ephemeral) + except (TypeError, ValueError): + raise exceptions.CommandError("Ephemeral must be an integer.") + try: + rxtx_factor = float(rxtx_factor) + except (TypeError, ValueError): + raise exceptions.CommandError("rxtx_factor must be a float.") + + try: + is_public = utils.bool_from_str(is_public) + except Exception: + raise exceptions.CommandError("is_public must be a boolean.") + + body = { + "flavor": { + "name": name, + "ram": ram, + "vcpus": vcpus, + "disk": disk, + "id": flavorid, + "swap": swap, + "OS-FLV-EXT-DATA:ephemeral": ephemeral, + "rxtx_factor": rxtx_factor, + "os-flavor-access:is_public": is_public, + } + } + + return self._create("/flavors", body, "flavor") diff --git a/awx/lib/site-packages/novaclient/v1_1/floating_ip_dns.py b/awx/lib/site-packages/novaclient/v1_1/floating_ip_dns.py new file mode 100644 index 0000000000..a5dfcb8d96 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/floating_ip_dns.py @@ -0,0 +1,135 @@ +# Copyright 2011 Andrew Bogott for The Wikimedia Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base +from novaclient.openstack.common.py3kcompat import urlutils + + +def _quote_domain(domain): + """Special quoting rule for placing domain names on a url line. + + Domain names tend to have .'s in them. Urllib doesn't quote dots, + but Routes tends to choke on them, so we need an extra level of + by-hand quoting here. + """ + return urlutils.quote(domain.replace('.', '%2E')) + + +class FloatingIPDNSDomain(base.Resource): + def delete(self): + self.manager.delete(self.domain) + + def create(self): + if self.scope == 'public': + self.manager.create_public(self.domain, self.project) + else: + self.manager.create_private(self.domain, self.availability_zone) + + def get(self): + entries = self.manager.domains() + for entry in entries: + if entry.get('domain') == self.domain: + return entry + + return None + + +class FloatingIPDNSDomainManager(base.Manager): + resource_class = FloatingIPDNSDomain + + def domains(self): + """Return the list of available dns domains.""" + return self._list("/os-floating-ip-dns", "domain_entries") + + def create_private(self, fqdomain, availability_zone): + """Add or modify a private DNS domain.""" + body = {'domain_entry': + {'scope': 'private', + 'availability_zone': availability_zone}} + return self._update('/os-floating-ip-dns/%s' % _quote_domain(fqdomain), + body, + 'domain_entry') + + def create_public(self, fqdomain, project): + """Add or modify a public DNS domain.""" + body = {'domain_entry': + {'scope': 'public', + 'project': project}} + + return self._update('/os-floating-ip-dns/%s' % _quote_domain(fqdomain), + body, + 'domain_entry') + + def delete(self, fqdomain): + """Delete the specified domain.""" + self._delete("/os-floating-ip-dns/%s" % _quote_domain(fqdomain)) + + +class FloatingIPDNSEntry(base.Resource): + def delete(self): + self.manager.delete(self.name, self.domain) + + def create(self): + self.manager.create(self.domain, self.name, + self.ip, self.dns_type) + + def get(self): + return self.manager.get(self.domain, self.name) + + +class FloatingIPDNSEntryManager(base.Manager): + resource_class = FloatingIPDNSEntry + + def get(self, domain, name): + """Return a list of entries for the given domain and ip or name.""" + return self._get("/os-floating-ip-dns/%s/entries/%s" % + (_quote_domain(domain), name), + "dns_entry") + + def get_for_ip(self, domain, ip): + """Return a list of entries for the given domain and ip or name.""" + qparams = {'ip': ip} + params = "?%s" % urlutils.urlencode(qparams) + + return self._list("/os-floating-ip-dns/%s/entries%s" % + (_quote_domain(domain), params), + "dns_entries") + + def create(self, domain, name, ip, dns_type): + """Add a new DNS entry.""" + body = {'dns_entry': + {'ip': ip, + 'dns_type': dns_type}} + + return self._update("/os-floating-ip-dns/%s/entries/%s" % + (_quote_domain(domain), name), + body, + "dns_entry") + + def modify_ip(self, domain, name, ip): + """Add a new DNS entry.""" + body = {'dns_entry': + {'ip': ip, + 'dns_type': 'A'}} + + return self._update("/os-floating-ip-dns/%s/entries/%s" % + (_quote_domain(domain), name), + body, + "dns_entry") + + def delete(self, domain, name): + """Delete entry specified by name and domain.""" + self._delete("/os-floating-ip-dns/%s/entries/%s" % + (_quote_domain(domain), name)) diff --git a/awx/lib/site-packages/novaclient/v1_1/floating_ip_pools.py b/awx/lib/site-packages/novaclient/v1_1/floating_ip_pools.py new file mode 100644 index 0000000000..7666bd57f8 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/floating_ip_pools.py @@ -0,0 +1,32 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base + + +class FloatingIPPool(base.Resource): + def __repr__(self): + return "" % self.name + + +class FloatingIPPoolManager(base.ManagerWithFind): + resource_class = FloatingIPPool + + def list(self): + """ + Retrieve a list of all floating ip pools. + """ + return self._list('/os-floating-ip-pools', 'floating_ip_pools') diff --git a/awx/lib/site-packages/novaclient/v1_1/floating_ips.py b/awx/lib/site-packages/novaclient/v1_1/floating_ips.py new file mode 100644 index 0000000000..503e6269c0 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/floating_ips.py @@ -0,0 +1,56 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base + + +class FloatingIP(base.Resource): + def delete(self): + """ + Delete this floating ip + """ + self.manager.delete(self) + + +class FloatingIPManager(base.ManagerWithFind): + resource_class = FloatingIP + + def list(self): + """ + List floating ips for a tenant + """ + return self._list("/os-floating-ips", "floating_ips") + + def create(self, pool=None): + """ + Create (allocate) a floating ip for a tenant + """ + return self._create("/os-floating-ips", {'pool': pool}, "floating_ip") + + def delete(self, floating_ip): + """ + Delete (deallocate) a floating ip for a tenant + + :param key: The :class:`Keypair` (or its ID) to delete. + """ + self._delete("/os-floating-ips/%s" % base.getid(floating_ip)) + + def get(self, floating_ip): + """ + Retrieve a floating ip + """ + return self._get("/os-floating-ips/%s" % base.getid(floating_ip), + "floating_ip") diff --git a/awx/lib/site-packages/novaclient/v1_1/floating_ips_bulk.py b/awx/lib/site-packages/novaclient/v1_1/floating_ips_bulk.py new file mode 100644 index 0000000000..1eeaaaa230 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/floating_ips_bulk.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Bulk Floating IPs interface +""" +from novaclient import base + + +class FloatingIP(base.Resource): + def __repr__(self): + return "" % self.address + + +class FloatingIPBulkManager(base.ManagerWithFind): + resource_class = FloatingIP + + def list(self, host=None): + """ + List all floating IPs + """ + if host is None: + return self._list('/os-floating-ips-bulk', 'floating_ip_info') + else: + return self._list('/os-floating-ips-bulk/%s' % host, + 'floating_ip_info') + + def create(self, ip_range, pool=None, interface=None): + """ + Create floating IPs by range + """ + body = {"floating_ips_bulk_create": {'ip_range': ip_range}} + if pool is not None: + body['floating_ips_bulk_create']['pool'] = pool + if interface is not None: + body['floating_ips_bulk_create']['interface'] = interface + + return self._create('/os-floating-ips-bulk', body, + 'floating_ips_bulk_create') + + def delete(self, ip_range): + """ + Delete floating IPs by range + """ + body = {"ip_range": ip_range} + return self._update('/os-floating-ips-bulk/delete', body) diff --git a/awx/lib/site-packages/novaclient/v1_1/fping.py b/awx/lib/site-packages/novaclient/v1_1/fping.py new file mode 100644 index 0000000000..36ecac63a5 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/fping.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Fping interface. +""" + +from novaclient import base + + +class Fping(base.Resource): + """ + A server to fping. + """ + HUMAN_ID = True + + def __repr__(self): + return "" % self.id + + +class FpingManager(base.ManagerWithFind): + """ + Manage :class:`Fping` resources. + """ + resource_class = Fping + + def list(self, all_tenants=False, include=[], exclude=[]): + """ + Fping all servers. + + :rtype: list of :class:`Fping`. + """ + params = [] + if all_tenants: + params.append("all_tenants=1") + if include: + params.append("include=%s" % ",".join(include)) + elif exclude: + params.append("exclude=%s" % ",".join(exclude)) + uri = "/os-fping" + if params: + uri = "%s?%s" % (uri, "&".join(params)) + return self._list(uri, "servers") + + def get(self, server): + """ + Fping a specific server. + + :param network: ID of the server to fping. + :rtype: :class:`Fping` + """ + return self._get("/os-fping/%s" % base.getid(server), "server") diff --git a/awx/lib/site-packages/novaclient/v1_1/hosts.py b/awx/lib/site-packages/novaclient/v1_1/hosts.py new file mode 100644 index 0000000000..0b641d44c9 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/hosts.py @@ -0,0 +1,70 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +host interface (1.1 extension). +""" +from novaclient import base + + +class Host(base.Resource): + def __repr__(self): + return "" % self.host_name + + def _add_details(self, info): + dico = 'resource' in info and info['resource'] or info + for (k, v) in dico.items(): + setattr(self, k, v) + + def update(self, values): + return self.manager.update(self.host, values) + + def startup(self): + return self.manager.host_action(self.host, 'startup') + + def shutdown(self): + return self.manager.host_action(self.host, 'shutdown') + + def reboot(self): + return self.manager.host_action(self.host, 'reboot') + + +class HostManager(base.ManagerWithFind): + resource_class = Host + + def get(self, host): + """ + Describes cpu/memory/hdd info for host. + + :param host: destination host name. + """ + return self._list("/os-hosts/%s" % host, "host") + + def update(self, host, values): + """Update status or maintenance mode for the host.""" + return self._update("/os-hosts/%s" % host, values) + + def host_action(self, host, action): + """Perform an action on a host.""" + url = '/os-hosts/{0}/{1}'.format(host, action) + return self.api.client.get(url) + + def list(self, zone=None): + url = '/os-hosts' + if zone: + url = '/os-hosts?zone=%s' % zone + return self._list(url, "hosts") + + list_all = list diff --git a/awx/lib/site-packages/novaclient/v1_1/hypervisors.py b/awx/lib/site-packages/novaclient/v1_1/hypervisors.py new file mode 100644 index 0000000000..fae04026a0 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/hypervisors.py @@ -0,0 +1,72 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Hypervisors interface (1.1 extension). +""" + +from novaclient import base +from novaclient.openstack.common.py3kcompat import urlutils + + +class Hypervisor(base.Resource): + NAME_ATTR = 'hypervisor_hostname' + + def __repr__(self): + return "" % self.id + + +class HypervisorManager(base.ManagerWithFind): + resource_class = Hypervisor + + def list(self, detailed=True): + """ + Get a list of hypervisors. + """ + detail = "" + if detailed: + detail = "/detail" + return self._list('/os-hypervisors%s' % detail, 'hypervisors') + + def search(self, hypervisor_match, servers=False): + """ + Get a list of matching hypervisors. + + :param servers: If True, server information is also retrieved. + """ + target = 'servers' if servers else 'search' + url = ('/os-hypervisors/%s/%s' % + (urlutils.quote(hypervisor_match, safe=''), target)) + return self._list(url, 'hypervisors') + + def get(self, hypervisor): + """ + Get a specific hypervisor. + """ + return self._get("/os-hypervisors/%s" % base.getid(hypervisor), + "hypervisor") + + def uptime(self, hypervisor): + """ + Get the uptime for a specific hypervisor. + """ + return self._get("/os-hypervisors/%s/uptime" % base.getid(hypervisor), + "hypervisor") + + def statistics(self): + """ + Get hypervisor statistics over all compute nodes. + """ + return self._get("/os-hypervisors/statistics", "hypervisor_statistics") diff --git a/awx/lib/site-packages/novaclient/v1_1/images.py b/awx/lib/site-packages/novaclient/v1_1/images.py new file mode 100644 index 0000000000..eff012de57 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/images.py @@ -0,0 +1,86 @@ +# Copyright 2010 Jacob Kaplan-Moss +""" +Image interface. +""" +from novaclient import base +from novaclient.openstack.common.py3kcompat import urlutils + + +class Image(base.Resource): + """ + An image is a collection of files used to create or rebuild a server. + """ + HUMAN_ID = True + + def __repr__(self): + return "" % self.name + + def delete(self): + """ + Delete this image. + """ + self.manager.delete(self) + + +class ImageManager(base.ManagerWithFind): + """ + Manage :class:`Image` resources. + """ + resource_class = Image + + def get(self, image): + """ + Get an image. + + :param image: The ID of the image to get. + :rtype: :class:`Image` + """ + return self._get("/images/%s" % base.getid(image), "image") + + def list(self, detailed=True, limit=None): + """ + Get a list of all images. + + :rtype: list of :class:`Image` + :param limit: maximum number of images to return. + """ + params = {} + detail = '' + if detailed: + detail = '/detail' + if limit: + params['limit'] = int(limit) + query = '?%s' % urlutils.urlencode(params) if params else '' + return self._list('/images%s%s' % (detail, query), 'images') + + def delete(self, image): + """ + Delete an image. + + It should go without saying that you can't delete an image + that you didn't create. + + :param image: The :class:`Image` (or its ID) to delete. + """ + self._delete("/images/%s" % base.getid(image)) + + def set_meta(self, image, metadata): + """ + Set an images metadata + + :param image: The :class:`Image` to add metadata to + :param metadata: A dict of metadata to add to the image + """ + body = {'metadata': metadata} + return self._create("/images/%s/metadata" % base.getid(image), body, + "metadata") + + def delete_meta(self, image, keys): + """ + Delete metadata from an image + + :param image: The :class:`Image` to add metadata to + :param keys: A list of metadata keys to delete from the image + """ + for k in keys: + self._delete("/images/%s/metadata/%s" % (base.getid(image), k)) diff --git a/awx/lib/site-packages/novaclient/v1_1/keypairs.py b/awx/lib/site-packages/novaclient/v1_1/keypairs.py new file mode 100644 index 0000000000..28bd760c10 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/keypairs.py @@ -0,0 +1,81 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Keypair interface (1.1 extension). +""" + +from novaclient import base + + +class Keypair(base.Resource): + """ + A keypair is a ssh key that can be injected into a server on launch. + """ + + def __repr__(self): + return "" % self.id + + def _add_details(self, info): + dico = 'keypair' in info and \ + info['keypair'] or info + for (k, v) in dico.items(): + setattr(self, k, v) + + @property + def id(self): + return self.name + + def delete(self): + self.manager.delete(self) + + +class KeypairManager(base.ManagerWithFind): + resource_class = Keypair + + def get(self, keypair): + """ + Get a keypair. + + :param keypair: The ID of the keypair to get. + :rtype: :class:`Keypair` + """ + return self._get("/os-keypairs/%s" % base.getid(keypair), "keypair") + + def create(self, name, public_key=None): + """ + Create a keypair + + :param name: name for the keypair to create + :param public_key: existing public key to import + """ + body = {'keypair': {'name': name}} + if public_key: + body['keypair']['public_key'] = public_key + return self._create('/os-keypairs', body, 'keypair') + + def delete(self, key): + """ + Delete a keypair + + :param key: The :class:`Keypair` (or its ID) to delete. + """ + self._delete('/os-keypairs/%s' % (base.getid(key))) + + def list(self): + """ + Get a list of keypairs. + """ + return self._list('/os-keypairs', 'keypairs') diff --git a/awx/lib/site-packages/novaclient/v1_1/limits.py b/awx/lib/site-packages/novaclient/v1_1/limits.py new file mode 100644 index 0000000000..8394bdd6b3 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/limits.py @@ -0,0 +1,87 @@ +# Copyright 2011 OpenStack Foundation + +from novaclient import base +from novaclient.openstack.common.py3kcompat import urlutils + + +class Limits(base.Resource): + """A collection of RateLimit and AbsoluteLimit objects.""" + + def __repr__(self): + return "" + + @property + def absolute(self): + for (name, value) in self._info['absolute'].items(): + yield AbsoluteLimit(name, value) + + @property + def rate(self): + for group in self._info['rate']: + uri = group['uri'] + regex = group['regex'] + for rate in group['limit']: + yield RateLimit(rate['verb'], uri, regex, rate['value'], + rate['remaining'], rate['unit'], + rate['next-available']) + + +class RateLimit(object): + """Data model that represents a flattened view of a single rate limit.""" + + def __init__(self, verb, uri, regex, value, remain, + unit, next_available): + self.verb = verb + self.uri = uri + self.regex = regex + self.value = value + self.remain = remain + self.unit = unit + self.next_available = next_available + + def __eq__(self, other): + return self.uri == other.uri \ + and self.regex == other.regex \ + and self.value == other.value \ + and self.verb == other.verb \ + and self.remain == other.remain \ + and self.unit == other.unit \ + and self.next_available == other.next_available + + def __repr__(self): + return "" % (self.verb, self.uri) + + +class AbsoluteLimit(object): + """Data model that represents a single absolute limit.""" + + def __init__(self, name, value): + self.name = name + self.value = value + + def __eq__(self, other): + return self.value == other.value and self.name == other.name + + def __repr__(self): + return "" % (self.name) + + +class LimitsManager(base.Manager): + """Manager object used to interact with limits resource.""" + + resource_class = Limits + + def get(self, reserved=False, tenant_id=None): + """ + Get a specific extension. + + :rtype: :class:`Limits` + """ + opts = {} + if reserved: + opts['reserved'] = 1 + if tenant_id: + opts['tenant_id'] = tenant_id + query_string = "?%s" % urlutils.urlencode(opts) if opts else "" + + return self._get("/limits%s" % query_string, "limits") diff --git a/awx/lib/site-packages/novaclient/v1_1/networks.py b/awx/lib/site-packages/novaclient/v1_1/networks.py new file mode 100644 index 0000000000..18c5bd53e9 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/networks.py @@ -0,0 +1,148 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network interface. +""" + +from novaclient import base +from novaclient import exceptions + + +class Network(base.Resource): + """ + A network. + """ + HUMAN_ID = False + NAME_ATTR = "label" + + def __repr__(self): + return "" % self.label + + def delete(self): + self.manager.delete(self) + + +class NetworkManager(base.ManagerWithFind): + """ + Manage :class:`Network` resources. + """ + resource_class = Network + + def list(self): + """ + Get a list of all networks. + + :rtype: list of :class:`Network`. + """ + return self._list("/os-networks", "networks") + + def get(self, network): + """ + Get a specific network. + + :param network: The ID of the :class:`Network` to get. + :rtype: :class:`Network` + """ + return self._get("/os-networks/%s" % base.getid(network), + "network") + + def delete(self, network): + """ + Delete a specific network. + + :param network: The ID of the :class:`Network` to delete. + """ + self._delete("/os-networks/%s" % base.getid(network)) + + def create(self, **kwargs): + """ + Create (allocate) a network. The following parameters are + optional except for label; cidr or cidr_v6 must be specified, too. + + :param label: str + :param bridge: str + :param bridge_interface: str + :param cidr: str + :param cidr_v6: str + :param dns1: str + :param dns2: str + :param fixed_cidr: str + :param gateway: str + :param gateway_v6: str + :param multi_host: str + :param priority: str + :param project_id: str + :param vlan_start: int + :param vpn_start: int + + :rtype: list of :class:`Network` + """ + body = {"network": kwargs} + return self._create('/os-networks', body, 'network') + + def disassociate(self, network, disassociate_host=True, + disassociate_project=True): + """ + Disassociate a specific network from project and/or host. + + :param network: The ID of the :class:`Network`. + :param disassociate_host: Whether to disassociate the host + :param disassociate_project: Whether to disassociate the project + """ + if disassociate_host and disassociate_project: + body = {"disassociate": None} + elif disassociate_project: + body = {"disassociate_project": None} + elif disassociate_host: + body = {"disassociate_host": None} + else: + raise exceptions.CommandError( + "Must disassociate either host or project or both") + + self.api.client.post("/os-networks/%s/action" % + base.getid(network), body=body) + + def associate_host(self, network, host): + """ + Associate a specific network with a host. + + :param network: The ID of the :class:`Network`. + :param host: The name of the host to associate the network with + """ + self.api.client.post("/os-networks/%s/action" % + base.getid(network), + body={"associate_host": host}) + + def associate_project(self, network): + """ + Associate a specific network with a project. + + The project is defined by the project authenticated against + + :param network: The ID of the :class:`Network`. + """ + self.api.client.post("/os-networks/add", body={"id": network}) + + def add(self, network=None): + """ + Associates the current project with a network. Network can be chosen + automatically or provided explicitly. + + :param network: The ID of the :class:`Network` to associate (optional). + """ + self.api.client.post( + "/os-networks/add", + body={"id": base.getid(network) if network else None}) diff --git a/awx/lib/site-packages/novaclient/v1_1/quota_classes.py b/awx/lib/site-packages/novaclient/v1_1/quota_classes.py new file mode 100644 index 0000000000..0b669bc2cb --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/quota_classes.py @@ -0,0 +1,67 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base + + +class QuotaClassSet(base.Resource): + + @property + def id(self): + """QuotaClassSet does not have a 'id' attribute but base.Resource + needs it to self-refresh and QuotaSet is indexed by class_name""" + return self.class_name + + def update(self, *args, **kwargs): + return self.manager.update(self.class_name, *args, **kwargs) + + +class QuotaClassSetManager(base.Manager): + resource_class = QuotaClassSet + + def get(self, class_name): + return self._get("/os-quota-class-sets/%s" % (class_name), + "quota_class_set") + + def update(self, class_name, metadata_items=None, + injected_file_content_bytes=None, injected_file_path_bytes=None, + volumes=None, gigabytes=None, + ram=None, floating_ips=None, instances=None, + injected_files=None, cores=None, key_pairs=None, + security_groups=None, security_group_rules=None): + + body = {'quota_class_set': { + 'class_name': class_name, + 'metadata_items': metadata_items, + 'key_pairs': key_pairs, + 'injected_file_content_bytes': injected_file_content_bytes, + 'injected_file_path_bytes': injected_file_path_bytes, + 'volumes': volumes, + 'gigabytes': gigabytes, + 'ram': ram, + 'floating_ips': floating_ips, + 'instances': instances, + 'injected_files': injected_files, + 'cores': cores, + 'security_groups': security_groups, + 'security_group_rules': security_group_rules}} + + for key in body['quota_class_set'].keys(): + if body['quota_class_set'][key] is None: + body['quota_class_set'].pop(key) + + return self._update('/os-quota-class-sets/%s' % (class_name), + body, + 'quota_class_set') diff --git a/awx/lib/site-packages/novaclient/v1_1/quotas.py b/awx/lib/site-packages/novaclient/v1_1/quotas.py new file mode 100644 index 0000000000..7c7ce54e34 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/quotas.py @@ -0,0 +1,88 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import base + + +class QuotaSet(base.Resource): + + @property + def id(self): + """QuotaSet does not have a 'id' attribute but base.Resource needs it + to self-refresh and QuotaSet is indexed by tenant_id""" + return self.tenant_id + + def update(self, *args, **kwargs): + return self.manager.update(self.tenant_id, *args, **kwargs) + + +class QuotaSetManager(base.Manager): + resource_class = QuotaSet + + def get(self, tenant_id, user_id=None): + if hasattr(tenant_id, 'tenant_id'): + tenant_id = tenant_id.tenant_id + if user_id: + url = '/os-quota-sets/%s?user_id=%s' % (tenant_id, user_id) + else: + url = '/os-quota-sets/%s' % tenant_id + return self._get(url, "quota_set") + + def update(self, tenant_id, metadata_items=None, + injected_file_content_bytes=None, injected_file_path_bytes=None, + volumes=None, gigabytes=None, + ram=None, floating_ips=None, fixed_ips=None, instances=None, + injected_files=None, cores=None, key_pairs=None, + security_groups=None, security_group_rules=None, force=None, + user_id=None): + + body = {'quota_set': { + 'tenant_id': tenant_id, + 'metadata_items': metadata_items, + 'key_pairs': key_pairs, + 'injected_file_content_bytes': injected_file_content_bytes, + 'injected_file_path_bytes': injected_file_path_bytes, + 'volumes': volumes, + 'gigabytes': gigabytes, + 'ram': ram, + 'floating_ips': floating_ips, + 'fixed_ips': fixed_ips, + 'instances': instances, + 'injected_files': injected_files, + 'cores': cores, + 'security_groups': security_groups, + 'security_group_rules': security_group_rules, + 'force': force}} + + for key in body['quota_set'].keys(): + if body['quota_set'][key] is None: + body['quota_set'].pop(key) + + if user_id: + url = '/os-quota-sets/%s?user_id=%s' % (tenant_id, user_id) + else: + url = '/os-quota-sets/%s' % tenant_id + return self._update(url, body, 'quota_set') + + def defaults(self, tenant_id): + return self._get('/os-quota-sets/%s/defaults' % tenant_id, + 'quota_set') + + def delete(self, tenant_id, user_id=None): + if user_id: + url = '/os-quota-sets/%s?user_id=%s' % (tenant_id, user_id) + else: + url = '/os-quota-sets/%s' % tenant_id + self._delete(url) diff --git a/awx/lib/site-packages/novaclient/v1_1/security_group_rules.py b/awx/lib/site-packages/novaclient/v1_1/security_group_rules.py new file mode 100644 index 0000000000..1ebf809edd --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/security_group_rules.py @@ -0,0 +1,77 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Security group rules interface (1.1 extension). +""" + +from novaclient import base +from novaclient import exceptions + + +class SecurityGroupRule(base.Resource): + def __str__(self): + return str(self.id) + + def delete(self): + self.manager.delete(self) + + +class SecurityGroupRuleManager(base.Manager): + resource_class = SecurityGroupRule + + def create(self, parent_group_id, ip_protocol=None, from_port=None, + to_port=None, cidr=None, group_id=None): + """ + Create a security group rule + + :param ip_protocol: IP protocol, one of 'tcp', 'udp' or 'icmp' + :param from_port: Source port + :param to_port: Destination port + :param cidr: Destination IP address(es) in CIDR notation + :param group_id: Security group id (int) + :param parent_group_id: Parent security group id (int) + """ + + try: + from_port = int(from_port) + except (TypeError, ValueError): + raise exceptions.CommandError("From port must be an integer.") + try: + to_port = int(to_port) + except (TypeError, ValueError): + raise exceptions.CommandError("To port must be an integer.") + if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: + raise exceptions.CommandError("Ip protocol must be 'tcp', 'udp', " + "or 'icmp'.") + + body = {"security_group_rule": { + "ip_protocol": ip_protocol, + "from_port": from_port, + "to_port": to_port, + "cidr": cidr, + "group_id": group_id, + "parent_group_id": parent_group_id}} + + return self._create('/os-security-group-rules', body, + 'security_group_rule') + + def delete(self, rule): + """ + Delete a security group rule + + :param rule: The security group rule to delete (ID or Class) + """ + self._delete('/os-security-group-rules/%s' % base.getid(rule)) diff --git a/awx/lib/site-packages/novaclient/v1_1/security_groups.py b/awx/lib/site-packages/novaclient/v1_1/security_groups.py new file mode 100644 index 0000000000..b929b4e20d --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/security_groups.py @@ -0,0 +1,96 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Security group interface (1.1 extension). +""" + +import six + +from novaclient import base +from novaclient.openstack.common.py3kcompat import urlutils + + +class SecurityGroup(base.Resource): + def __str__(self): + return str(self.id) + + def delete(self): + self.manager.delete(self) + + def update(self): + self.manager.update(self) + + +class SecurityGroupManager(base.ManagerWithFind): + resource_class = SecurityGroup + + def create(self, name, description): + """ + Create a security group + + :param name: name for the security group to create + :param description: description of the security group + :rtype: the security group object + """ + body = {"security_group": {"name": name, 'description': description}} + return self._create('/os-security-groups', body, 'security_group') + + def update(self, group, name, description): + """ + Update a security group + + :param group: The security group to delete (group or ID) + :param name: name for the security group to update + :param description: description for the security group to update + :rtype: the security group object + """ + body = {"security_group": {"name": name, 'description': description}} + return self._update('/os-security-groups/%s' % base.getid(group), + body, 'security_group') + + def delete(self, group): + """ + Delete a security group + + :param group: The security group to delete (group or ID) + :rtype: None + """ + self._delete('/os-security-groups/%s' % base.getid(group)) + + def get(self, group_id): + """ + Get a security group + + :param group_id: The security group to get by ID + :rtype: :class:`SecurityGroup` + """ + return self._get('/os-security-groups/%s' % group_id, + 'security_group') + + def list(self, search_opts=None): + """ + Get a list of all security_groups + + :rtype: list of :class:`SecurityGroup` + """ + search_opts = search_opts or {} + + qparams = dict((k, v) for (k, v) in six.iteritems(search_opts) if v) + + query_string = '?%s' % urlutils.urlencode(qparams) if qparams else '' + + return self._list('/os-security-groups%s' % query_string, + 'security_groups') diff --git a/awx/lib/site-packages/novaclient/v1_1/servers.py b/awx/lib/site-packages/novaclient/v1_1/servers.py new file mode 100644 index 0000000000..8d2df4fdc9 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/servers.py @@ -0,0 +1,951 @@ +# Copyright 2010 Jacob Kaplan-Moss + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Server interface. +""" + +import six + +from novaclient import base +from novaclient import crypto +from novaclient.openstack.common.py3kcompat import urlutils +from novaclient.v1_1.security_groups import SecurityGroup + +REBOOT_SOFT, REBOOT_HARD = 'SOFT', 'HARD' + + +class Server(base.Resource): + HUMAN_ID = True + + def __repr__(self): + return "" % self.name + + def delete(self): + """ + Delete (i.e. shut down and delete the image) this server. + """ + self.manager.delete(self) + + def update(self, name=None): + """ + Update the name or the password for this server. + + :param name: Update the server's name. + :param password: Update the root password. + """ + self.manager.update(self, name=name) + + def get_console_output(self, length=None): + """ + Get text console log output from Server. + + :param length: The number of lines you would like to retrieve (as int) + """ + return self.manager.get_console_output(self, length) + + def get_vnc_console(self, console_type): + """ + Get vnc console for a Server. + + :param console_type: Type of console ('novnc' or 'xvpvnc') + """ + return self.manager.get_vnc_console(self, console_type) + + def get_spice_console(self, console_type): + """ + Get spice console for a Server. + + :param console_type: Type of console ('spice-html5') + """ + return self.manager.get_spice_console(self, console_type) + + def get_password(self, private_key): + """ + Get password for a Server. + + :param private_key: Path to private key file for decryption + """ + return self.manager.get_password(self, private_key) + + def clear_password(self): + """ + Get password for a Server. + + """ + return self.manager.clear_password(self) + + def add_fixed_ip(self, network_id): + """ + Add an IP address on a network. + + :param network_id: The ID of the network the IP should be on. + """ + self.manager.add_fixed_ip(self, network_id) + + def add_floating_ip(self, address, fixed_address=None): + """ + Add floating IP to an instance + + :param address: The ip address or FloatingIP to add to the instance + :param fixed_address: The fixedIP address the FloatingIP is to be + associated with (optional) + """ + self.manager.add_floating_ip(self, address, fixed_address) + + def remove_floating_ip(self, address): + """ + Remove floating IP from an instance + + :param address: The ip address or FloatingIP to remove + """ + self.manager.remove_floating_ip(self, address) + + def stop(self): + """ + Stop -- Stop the running server. + """ + self.manager.stop(self) + + def force_delete(self): + """ + Force delete -- Force delete a server. + """ + self.manager.force_delete(self) + + def restore(self): + """ + Restore -- Restore a server in 'soft-deleted' state. + """ + self.manager.restore(self) + + def start(self): + """ + Start -- Start the paused server. + """ + self.manager.start(self) + + def pause(self): + """ + Pause -- Pause the running server. + """ + self.manager.pause(self) + + def unpause(self): + """ + Unpause -- Unpause the paused server. + """ + self.manager.unpause(self) + + def lock(self): + """ + Lock -- Lock the instance from certain operations. + """ + self.manager.lock(self) + + def unlock(self): + """ + Unlock -- Remove instance lock. + """ + self.manager.unlock(self) + + def suspend(self): + """ + Suspend -- Suspend the running server. + """ + self.manager.suspend(self) + + def resume(self): + """ + Resume -- Resume the suspended server. + """ + self.manager.resume(self) + + def rescue(self): + """ + Rescue -- Rescue the problematic server. + """ + return self.manager.rescue(self) + + def unrescue(self): + """ + Unrescue -- Unrescue the rescued server. + """ + self.manager.unrescue(self) + + def diagnostics(self): + """Diagnostics -- Retrieve server diagnostics.""" + return self.manager.diagnostics(self) + + def migrate(self): + """ + Migrate a server to a new host. + """ + self.manager.migrate(self) + + def remove_fixed_ip(self, address): + """ + Remove an IP address. + + :param address: The IP address to remove. + """ + self.manager.remove_fixed_ip(self, address) + + def change_password(self, password): + """ + Update the password for a server. + """ + self.manager.change_password(self, password) + + def reboot(self, reboot_type=REBOOT_SOFT): + """ + Reboot the server. + + :param reboot_type: either :data:`REBOOT_SOFT` for a software-level + reboot, or `REBOOT_HARD` for a virtual power cycle hard reboot. + """ + self.manager.reboot(self, reboot_type) + + def rebuild(self, image, password=None, **kwargs): + """ + Rebuild -- shut down and then re-image -- this server. + + :param image: the :class:`Image` (or its ID) to re-image with. + :param password: string to set as password on the rebuilt server. + """ + return self.manager.rebuild(self, image, password=password, **kwargs) + + def resize(self, flavor, **kwargs): + """ + Resize the server's resources. + + :param flavor: the :class:`Flavor` (or its ID) to resize to. + + Until a resize event is confirmed with :meth:`confirm_resize`, the old + server will be kept around and you'll be able to roll back to the old + flavor quickly with :meth:`revert_resize`. All resizes are + automatically confirmed after 24 hours. + """ + self.manager.resize(self, flavor, **kwargs) + + def create_image(self, image_name, metadata=None): + """ + Create an image based on this server. + + :param image_name: The name to assign the newly create image. + :param metadata: Metadata to assign to the image. + """ + return self.manager.create_image(self, image_name, metadata) + + def backup(self, backup_name, backup_type, rotation): + """ + Backup a server instance. + + :param backup_name: Name of the backup image + :param backup_type: The backup type, like 'daily' or 'weekly' + :param rotation: Int parameter representing how many backups to + keep around. + """ + self.manager.backup(self, backup_name, backup_type, rotation) + + def confirm_resize(self): + """ + Confirm that the resize worked, thus removing the original server. + """ + self.manager.confirm_resize(self) + + def revert_resize(self): + """ + Revert a previous resize, switching back to the old server. + """ + self.manager.revert_resize(self) + + @property + def networks(self): + """ + Generate a simplified list of addresses + """ + networks = {} + try: + for network_label, address_list in self.addresses.items(): + networks[network_label] = [a['addr'] for a in address_list] + return networks + except Exception: + return {} + + def live_migrate(self, host=None, + block_migration=False, + disk_over_commit=False): + """ + Migrates a running instance to a new machine. + """ + self.manager.live_migrate(self, host, + block_migration, + disk_over_commit) + + def reset_state(self, state='error'): + """ + Reset the state of an instance to active or error. + """ + self.manager.reset_state(self, state) + + def reset_network(self): + """ + Reset network of an instance. + """ + self.manager.reset_network(self) + + def add_security_group(self, security_group): + """ + Add a security group to an instance. + """ + self.manager.add_security_group(self, security_group) + + def remove_security_group(self, security_group): + """ + Remove a security group from an instance. + """ + self.manager.remove_security_group(self, security_group) + + def list_security_group(self): + """ + List security group(s) of an instance. + """ + return self.manager.list_security_group(self) + + def evacuate(self, host, on_shared_storage, password=None): + """ + Evacuate an instance from failed host to specified host. + + :param host: Name of the target host + :param on_shared_storage: Specifies whether instance files located + on shared storage + :param password: string to set as password on the evacuated server. + """ + return self.manager.evacuate(self, host, on_shared_storage, password) + + def interface_list(self): + """ + List interfaces attached to an instance. + """ + return self.manager.interface_list(self) + + def interface_attach(self, port_id, net_id, fixed_ip): + """ + Attach a network interface to an instance. + """ + return self.manager.interface_attach(self, port_id, net_id, fixed_ip) + + def interface_detach(self, port_id): + """ + Detach a network interface from an instance. + """ + return self.manager.interface_detach(self, port_id) + + +class ServerManager(base.BootingManagerWithFind): + resource_class = Server + + def get(self, server): + """ + Get a server. + + :param server: ID of the :class:`Server` to get. + :rtype: :class:`Server` + """ + return self._get("/servers/%s" % base.getid(server), "server") + + def list(self, detailed=True, search_opts=None, marker=None, limit=None): + """ + Get a list of servers. + + :param detailed: Whether to return detailed server info (optional). + :param search_opts: Search options to filter out servers (optional). + :param marker: Begin returning servers that appear later in the server + list than that represented by this server id (optional). + :param limit: Maximum number of servers to return (optional). + + :rtype: list of :class:`Server` + """ + if search_opts is None: + search_opts = {} + + qparams = {} + + for opt, val in six.iteritems(search_opts): + if val: + qparams[opt] = val + + if marker: + qparams['marker'] = marker + + if limit: + qparams['limit'] = limit + + query_string = "?%s" % urlutils.urlencode(qparams) if qparams else "" + + detail = "" + if detailed: + detail = "/detail" + return self._list("/servers%s%s" % (detail, query_string), "servers") + + def add_fixed_ip(self, server, network_id): + """ + Add an IP address on a network. + + :param server: The :class:`Server` (or its ID) to add an IP to. + :param network_id: The ID of the network the IP should be on. + """ + self._action('addFixedIp', server, {'networkId': network_id}) + + def remove_fixed_ip(self, server, address): + """ + Remove an IP address. + + :param server: The :class:`Server` (or its ID) to add an IP to. + :param address: The IP address to remove. + """ + self._action('removeFixedIp', server, {'address': address}) + + def add_floating_ip(self, server, address, fixed_address=None): + """ + Add a floating ip to an instance + + :param server: The :class:`Server` (or its ID) to add an IP to. + :param address: The FloatingIP or string floating address to add. + :param fixed_address: The FixedIP the floatingIP should be + associated with (optional) + """ + + address = address.ip if hasattr(address, 'ip') else address + if fixed_address: + if hasattr(fixed_address, 'ip'): + fixed_address = fixed_address.ip + self._action('addFloatingIp', server, + {'address': address, 'fixed_address': fixed_address}) + else: + self._action('addFloatingIp', server, {'address': address}) + + def remove_floating_ip(self, server, address): + """ + Remove a floating IP address. + + :param server: The :class:`Server` (or its ID) to remove an IP from. + :param address: The FloatingIP or string floating address to remove. + """ + + address = address.ip if hasattr(address, 'ip') else address + self._action('removeFloatingIp', server, {'address': address}) + + def get_vnc_console(self, server, console_type): + """ + Get a vnc console for an instance + + :param server: The :class:`Server` (or its ID) to add an IP to. + :param console_type: Type of vnc console to get ('novnc' or 'xvpvnc') + """ + + return self._action('os-getVNCConsole', server, + {'type': console_type})[1] + + def get_spice_console(self, server, console_type): + """ + Get a spice console for an instance + + :param server: The :class:`Server` (or its ID) to add an IP to. + :param console_type: Type of spice console to get ('spice-html5') + """ + + return self._action('os-getSPICEConsole', server, + {'type': console_type})[1] + + def get_password(self, server, private_key): + """ + Get password for an instance + + Requires that openssl is installed and in the path + + :param server: The :class:`Server` (or its ID) to add an IP to. + :param private_key: The private key to decrypt password + """ + + _resp, body = self.api.client.get("/servers/%s/os-server-password" + % base.getid(server)) + if body and body.get('password'): + try: + return crypto.decrypt_password(private_key, body['password']) + except Exception as exc: + return '%sFailed to decrypt:\n%s' % (exc, body['password']) + return '' + + def clear_password(self, server): + """ + Clear password for an instance + + :param server: The :class:`Server` (or its ID) to add an IP to. + """ + + return self._delete("/servers/%s/os-server-password" + % base.getid(server)) + + def stop(self, server): + """ + Stop the server. + """ + return self._action('os-stop', server, None) + + def force_delete(self, server): + """ + Force delete the server. + """ + return self._action('forceDelete', server, None) + + def restore(self, server): + """ + Restore soft-deleted server. + """ + return self._action('restore', server, None) + + def start(self, server): + """ + Start the server. + """ + self._action('os-start', server, None) + + def pause(self, server): + """ + Pause the server. + """ + self._action('pause', server, None) + + def unpause(self, server): + """ + Unpause the server. + """ + self._action('unpause', server, None) + + def lock(self, server): + """ + Lock the server. + """ + self._action('lock', server, None) + + def unlock(self, server): + """ + Unlock the server. + """ + self._action('unlock', server, None) + + def suspend(self, server): + """ + Suspend the server. + """ + self._action('suspend', server, None) + + def resume(self, server): + """ + Resume the server. + """ + self._action('resume', server, None) + + def rescue(self, server): + """ + Rescue the server. + """ + return self._action('rescue', server, None) + + def unrescue(self, server): + """ + Unrescue the server. + """ + self._action('unrescue', server, None) + + def diagnostics(self, server): + """Retrieve server diagnostics.""" + return self.api.client.get("/servers/%s/diagnostics" % + base.getid(server)) + + def create(self, name, image, flavor, meta=None, files=None, + reservation_id=None, min_count=None, + max_count=None, security_groups=None, userdata=None, + key_name=None, availability_zone=None, + block_device_mapping=None, block_device_mapping_v2=None, + nics=None, scheduler_hints=None, + config_drive=None, disk_config=None, **kwargs): + # TODO(anthony): indicate in doc string if param is an extension + # and/or optional + """ + Create (boot) a new server. + + :param name: Something to name the server. + :param image: The :class:`Image` to boot with. + :param flavor: The :class:`Flavor` to boot onto. + :param meta: A dict of arbitrary key/value metadata to store for this + server. A maximum of five entries is allowed, and both + keys and values must be 255 characters or less. + :param files: A dict of files to overrwrite on the server upon boot. + Keys are file names (i.e. ``/etc/passwd``) and values + are the file contents (either as a string or as a + file-like object). A maximum of five entries is allowed, + and each file must be 10k or less. + :param userdata: user data to pass to be exposed by the metadata + server this can be a file type object as well or a + string. + :param reservation_id: a UUID for the set of servers being requested. + :param key_name: (optional extension) name of previously created + keypair to inject into the instance. + :param availability_zone: Name of the availability zone for instance + placement. + :param block_device_mapping: (optional extension) A dict of block + device mappings for this server. + :param block_device_mapping_v2: (optional extension) A dict of block + device mappings for this server. + :param nics: (optional extension) an ordered list of nics to be + added to this server, with information about + connected networks, fixed ips, port etc. + :param scheduler_hints: (optional extension) arbitrary key-value pairs + specified by the client to help boot an instance + :param config_drive: (optional extension) value for config drive + either boolean, or volume-id + :param disk_config: (optional extension) control how the disk is + partitioned when the server is created. possible + values are 'AUTO' or 'MANUAL'. + """ + if not min_count: + min_count = 1 + if not max_count: + max_count = min_count + if min_count > max_count: + min_count = max_count + + boot_args = [name, image, flavor] + + boot_kwargs = dict( + meta=meta, files=files, userdata=userdata, + reservation_id=reservation_id, min_count=min_count, + max_count=max_count, security_groups=security_groups, + key_name=key_name, availability_zone=availability_zone, + scheduler_hints=scheduler_hints, config_drive=config_drive, + disk_config=disk_config, **kwargs) + + if block_device_mapping: + resource_url = "/os-volumes_boot" + boot_kwargs['block_device_mapping'] = block_device_mapping + elif block_device_mapping_v2: + resource_url = "/os-volumes_boot" + boot_kwargs['block_device_mapping_v2'] = block_device_mapping_v2 + else: + resource_url = "/servers" + if nics: + boot_kwargs['nics'] = nics + + response_key = "server" + return self._boot(resource_url, response_key, *boot_args, + **boot_kwargs) + + def update(self, server, name=None): + """ + Update the name or the password for a server. + + :param server: The :class:`Server` (or its ID) to update. + :param name: Update the server's name. + """ + if name is None: + return + + body = { + "server": { + "name": name, + }, + } + + return self._update("/servers/%s" % base.getid(server), body, "server") + + def change_password(self, server, password): + """ + Update the password for a server. + """ + self._action("changePassword", server, {"adminPass": password}) + + def delete(self, server): + """ + Delete (i.e. shut down and delete the image) this server. + """ + self._delete("/servers/%s" % base.getid(server)) + + def reboot(self, server, reboot_type=REBOOT_SOFT): + """ + Reboot a server. + + :param server: The :class:`Server` (or its ID) to share onto. + :param reboot_type: either :data:`REBOOT_SOFT` for a software-level + reboot, or `REBOOT_HARD` for a virtual power cycle hard reboot. + """ + self._action('reboot', server, {'type': reboot_type}) + + def rebuild(self, server, image, password=None, disk_config=None, + **kwargs): + """ + Rebuild -- shut down and then re-image -- a server. + + :param server: The :class:`Server` (or its ID) to share onto. + :param image: the :class:`Image` (or its ID) to re-image with. + :param password: string to set as password on the rebuilt server. + :param disk_config: partitioning mode to use on the rebuilt server. + Valid values are 'AUTO' or 'MANUAL' + """ + body = {'imageRef': base.getid(image)} + if password is not None: + body['adminPass'] = password + if disk_config is not None: + body['OS-DCF:diskConfig'] = disk_config + + _resp, body = self._action('rebuild', server, body, **kwargs) + return Server(self, body['server']) + + def migrate(self, server): + """ + Migrate a server to a new host. + + :param server: The :class:`Server` (or its ID). + """ + self._action('migrate', server) + + def resize(self, server, flavor, disk_config=None, **kwargs): + """ + Resize a server's resources. + + :param server: The :class:`Server` (or its ID) to share onto. + :param flavor: the :class:`Flavor` (or its ID) to resize to. + :param disk_config: partitioning mode to use on the rebuilt server. + Valid values are 'AUTO' or 'MANUAL' + + Until a resize event is confirmed with :meth:`confirm_resize`, the old + server will be kept around and you'll be able to roll back to the old + flavor quickly with :meth:`revert_resize`. All resizes are + automatically confirmed after 24 hours. + """ + info = {'flavorRef': base.getid(flavor)} + if disk_config is not None: + info['OS-DCF:diskConfig'] = disk_config + + self._action('resize', server, info=info, **kwargs) + + def confirm_resize(self, server): + """ + Confirm that the resize worked, thus removing the original server. + + :param server: The :class:`Server` (or its ID) to share onto. + """ + self._action('confirmResize', server) + + def revert_resize(self, server): + """ + Revert a previous resize, switching back to the old server. + + :param server: The :class:`Server` (or its ID) to share onto. + """ + self._action('revertResize', server) + + def create_image(self, server, image_name, metadata=None): + """ + Snapshot a server. + + :param server: The :class:`Server` (or its ID) to share onto. + :param image_name: Name to give the snapshot image + :param meta: Metadata to give newly-created image entity + """ + body = {'name': image_name, 'metadata': metadata or {}} + resp = self._action('createImage', server, body)[0] + location = resp.headers['location'] + image_uuid = location.split('/')[-1] + return image_uuid + + def backup(self, server, backup_name, backup_type, rotation): + """ + Backup a server instance. + + :param server: The :class:`Server` (or its ID) to share onto. + :param backup_name: Name of the backup image + :param backup_type: The backup type, like 'daily' or 'weekly' + :param rotation: Int parameter representing how many backups to + keep around. + """ + body = {'name': backup_name, + 'backup_type': backup_type, + 'rotation': rotation} + self._action('createBackup', server, body) + + def set_meta(self, server, metadata): + """ + Set a servers metadata + :param server: The :class:`Server` to add metadata to + :param metadata: A dict of metadata to add to the server + """ + body = {'metadata': metadata} + return self._create("/servers/%s/metadata" % base.getid(server), + body, "metadata") + + def get_console_output(self, server, length=None): + """ + Get text console log output from Server. + + :param server: The :class:`Server` (or its ID) whose console output + you would like to retrieve. + :param length: The number of tail loglines you would like to retrieve. + """ + return self._action('os-getConsoleOutput', + server, + {'length': length})[1]['output'] + + def delete_meta(self, server, keys): + """ + Delete metadata from an server + :param server: The :class:`Server` to add metadata to + :param keys: A list of metadata keys to delete from the server + """ + for k in keys: + self._delete("/servers/%s/metadata/%s" % (base.getid(server), k)) + + def live_migrate(self, server, host, block_migration, disk_over_commit): + """ + Migrates a running instance to a new machine. + + :param server: instance id which comes from nova list. + :param host: destination host name. + :param block_migration: if True, do block_migration. + :param disk_over_commit: if True, Allow overcommit. + + """ + self._action('os-migrateLive', server, + {'host': host, + 'block_migration': block_migration, + 'disk_over_commit': disk_over_commit}) + + def reset_state(self, server, state='error'): + """ + Reset the state of an instance to active or error. + + :param server: ID of the instance to reset the state of. + :param state: Desired state; either 'active' or 'error'. + Defaults to 'error'. + """ + self._action('os-resetState', server, dict(state=state)) + + def reset_network(self, server): + """ + Reset network of an instance. + """ + self._action('resetNetwork', server) + + def add_security_group(self, server, security_group): + """ + Add a Security Group to an instance + + :param server: ID of the instance. + :param security_group: The name of security group to add. + + """ + self._action('addSecurityGroup', server, {'name': security_group}) + + def remove_security_group(self, server, security_group): + """ + Add a Security Group to an instance + + :param server: ID of the instance. + :param security_group: The name of security group to remove. + + """ + self._action('removeSecurityGroup', server, {'name': security_group}) + + def list_security_group(self, server): + """ + List Security Group(s) of an instance + + :param server: ID of the instance. + + """ + return self._list('/servers/%s/os-security-groups' % + base.getid(server), 'security_groups', SecurityGroup) + + def evacuate(self, server, host, on_shared_storage, password=None): + """ + Evacuate a server instance. + + :param server: The :class:`Server` (or its ID) to share onto. + :param host: Name of the target host. + :param on_shared_storage: Specifies whether instance files located + on shared storage + :param password: string to set as password on the evacuated server. + """ + body = { + 'host': host, + 'onSharedStorage': on_shared_storage, + } + + if password is not None: + body['adminPass'] = password + + return self._action('evacuate', server, body) + + def interface_list(self, server): + """ + List attached network interfaces + + :param server: The :class:`Server` (or its ID) to query. + """ + return self._list('/servers/%s/os-interface' % base.getid(server), + 'interfaceAttachments') + + def interface_attach(self, server, port_id, net_id, fixed_ip): + """ + Attach a network_interface to an instance. + + :param server: The :class:`Server` (or its ID) to attach to. + :param port_id: The port to attach. + """ + + body = {'interfaceAttachment': {}} + if port_id: + body['interfaceAttachment']['port_id'] = port_id + if net_id: + body['interfaceAttachment']['net_id'] = net_id + if fixed_ip: + body['interfaceAttachment']['fixed_ips'] = [ + {'ip_address': fixed_ip}] + + return self._create('/servers/%s/os-interface' % base.getid(server), + body, 'interfaceAttachment') + + def interface_detach(self, server, port_id): + """ + Detach a network_interface from an instance. + + :param server: The :class:`Server` (or its ID) to detach from. + :param port_id: The port to detach. + """ + self._delete('/servers/%s/os-interface/%s' % (base.getid(server), + port_id)) + + def _action(self, action, server, info=None, **kwargs): + """ + Perform a server "action" -- reboot/rebuild/resize/etc. + """ + body = {action: info} + self.run_hooks('modify_body_for_action', body, **kwargs) + url = '/servers/%s/action' % base.getid(server) + return self.api.client.post(url, body=body) diff --git a/awx/lib/site-packages/novaclient/v1_1/services.py b/awx/lib/site-packages/novaclient/v1_1/services.py new file mode 100644 index 0000000000..03424026e9 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/services.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +service interface +""" +from novaclient import base + + +class Service(base.Resource): + def __repr__(self): + return "" % self.service + + def _add_details(self, info): + dico = 'resource' in info and info['resource'] or info + for (k, v) in dico.items(): + setattr(self, k, v) + + +class ServiceManager(base.ManagerWithFind): + resource_class = Service + + def list(self, host=None, binary=None): + """ + Describes cpu/memory/hdd info for host. + + :param host: destination host name. + """ + url = "/os-services" + filters = [] + if host: + filters.append("host=%s" % host) + if binary: + filters.append("binary=%s" % binary) + if filters: + url = "%s?%s" % (url, "&".join(filters)) + return self._list(url, "services") + + def enable(self, host, binary): + """Enable the service specified by hostname and binary.""" + body = {"host": host, "binary": binary} + return self._update("/os-services/enable", body, "service") + + def disable(self, host, binary): + """Disable the service specified by hostname and binary.""" + body = {"host": host, "binary": binary} + return self._update("/os-services/disable", body, "service") + + def disable_log_reason(self, host, binary, reason): + """Disable the service with reason.""" + body = {"host": host, "binary": binary, "disabled_reason": reason} + return self._update("/os-services/disable-log-reason", body, "service") diff --git a/awx/lib/site-packages/novaclient/v1_1/shell.py b/awx/lib/site-packages/novaclient/v1_1/shell.py new file mode 100644 index 0000000000..d74c0b04cf --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/shell.py @@ -0,0 +1,3423 @@ +# Copyright 2010 Jacob Kaplan-Moss + +# Copyright 2011 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import argparse +import copy +import datetime +import getpass +import locale +import os +import sys +import time + +import six + +from novaclient import exceptions +from novaclient.openstack.common import strutils +from novaclient.openstack.common import timeutils +from novaclient import utils +from novaclient.v1_1 import availability_zones +from novaclient.v1_1 import quotas +from novaclient.v1_1 import servers + + +CLIENT_BDM2_KEYS = { + 'id': 'uuid', + 'source': 'source_type', + 'dest': 'destination_type', + 'bus': 'disk_bus', + 'device': 'device_name', + 'size': 'volume_size', + 'format': 'guest_format', + 'bootindex': 'boot_index', + 'type': 'device_type', + 'shutdown': 'delete_on_termination', +} + + +def _key_value_pairing(text): + try: + (k, v) = text.split('=', 1) + return (k, v) + except ValueError: + msg = "%r is not in the format of key=value" % text + raise argparse.ArgumentTypeError(msg) + + +def _match_image(cs, wanted_properties): + image_list = cs.images.list() + images_matched = [] + match = set(wanted_properties) + for img in image_list: + try: + if match == match.intersection(set(img.metadata.items())): + images_matched.append(img) + except AttributeError: + pass + return images_matched + + +def _parse_block_device_mapping_v2(args, image): + bdm = [] + + if args.boot_volume: + bdm_dict = {'uuid': args.boot_volume, 'source_type': 'volume', + 'destination_type': 'volume', 'boot_index': 0, + 'delete_on_termination': False} + bdm.append(bdm_dict) + + if args.snapshot: + bdm_dict = {'uuid': args.snapshot, 'source_type': 'snapshot', + 'destination_type': 'volume', 'boot_index': 0, + 'delete_on_termination': False} + bdm.append(bdm_dict) + + for device_spec in args.block_device: + spec_dict = dict(v.split('=') for v in device_spec.split(',')) + bdm_dict = {} + + for key, value in six.iteritems(spec_dict): + bdm_dict[CLIENT_BDM2_KEYS[key]] = value + + # Convert the delete_on_termination to a boolean or set it to true by + # default for local block devices when not specified. + if 'delete_on_termination' in bdm_dict: + action = bdm_dict['delete_on_termination'] + bdm_dict['delete_on_termination'] = (action == 'remove') + elif bdm_dict.get('destination_type') == 'local': + bdm_dict['delete_on_termination'] = True + + bdm.append(bdm_dict) + + for ephemeral_spec in args.ephemeral: + bdm_dict = {'source_type': 'blank', 'destination_type': 'local', + 'boot_index': -1, 'delete_on_termination': True} + + eph_dict = dict(v.split('=') for v in ephemeral_spec.split(',')) + if 'size' in eph_dict: + bdm_dict['volume_size'] = eph_dict['size'] + if 'format' in eph_dict: + bdm_dict['guest_format'] = eph_dict['format'] + + bdm.append(bdm_dict) + + if args.swap: + bdm_dict = {'source_type': 'blank', 'destination_type': 'local', + 'boot_index': -1, 'delete_on_termination': True, + 'guest_format': 'swap', 'volume_size': args.swap} + bdm.append(bdm_dict) + + return bdm + + +def _boot(cs, args, reservation_id=None, min_count=None, max_count=None): + """Boot a new server.""" + if min_count is None: + min_count = 1 + if max_count is None: + max_count = min_count + if min_count > max_count: + raise exceptions.CommandError("min_instances should be <= " + "max_instances") + if not min_count or not max_count: + raise exceptions.CommandError("min_instances nor max_instances should" + "be 0") + + if args.image: + image = _find_image(cs, args.image) + else: + image = None + + if not image and args.image_with: + images = _match_image(cs, args.image_with) + if images: + # TODO(harlowja): log a warning that we + # are selecting the first of many? + image = images[0] + + if not args.flavor: + raise exceptions.CommandError("you need to specify a Flavor ID ") + + if args.num_instances is not None: + if args.num_instances <= 1: + raise exceptions.CommandError("num_instances should be > 1") + max_count = args.num_instances + + flavor = _find_flavor(cs, args.flavor) + + meta = dict(v.split('=', 1) for v in args.meta) + + files = {} + for f in args.files: + try: + dst, src = f.split('=', 1) + files[dst] = open(src) + except IOError as e: + raise exceptions.CommandError("Can't open '%s': %s" % (src, e)) + except ValueError as e: + raise exceptions.CommandError("Invalid file argument '%s'. File " + "arguments must be of the form '--file '" % f) + + # use the os-keypair extension + key_name = None + if args.key_name is not None: + key_name = args.key_name + + if args.user_data: + try: + userdata = open(args.user_data) + except IOError as e: + raise exceptions.CommandError("Can't open '%s': %s" % + (args.user_data, e)) + else: + userdata = None + + if args.availability_zone: + availability_zone = args.availability_zone + else: + availability_zone = None + + if args.security_groups: + security_groups = args.security_groups.split(',') + else: + security_groups = None + + block_device_mapping = {} + for bdm in args.block_device_mapping: + device_name, mapping = bdm.split('=', 1) + block_device_mapping[device_name] = mapping + + block_device_mapping_v2 = _parse_block_device_mapping_v2(args, image) + + n_boot_args = len(filter(None, (image, args.boot_volume, args.snapshot))) + have_bdm = block_device_mapping_v2 or block_device_mapping + + # Fail if more than one boot devices are present + # or if there is no device to boot from. + if n_boot_args > 1 or n_boot_args == 0 and not have_bdm: + raise exceptions.CommandError( + "you need to specify at least one source ID (Image, Snapshot or " + "Volume), a block device mapping or provide a set of properties " + "to match against an image") + + if block_device_mapping and block_device_mapping_v2: + raise exceptions.CommandError( + "you can't mix old block devices (--block-device-mapping) " + "with the new ones (--block-device, --boot-volume, --snapshot, " + "--ephemeral, --swap)") + + nics = [] + for nic_str in args.nics: + err_msg = ("Invalid nic argument '%s'. Nic arguments must be of the " + "form --nic , with at minimum net-id or port-id " + "specified." % nic_str) + nic_info = {"net-id": "", "v4-fixed-ip": "", "port-id": ""} + + for kv_str in nic_str.split(","): + try: + k, v = kv_str.split("=", 1) + except ValueError as e: + raise exceptions.CommandError(err_msg) + + if k in nic_info: + nic_info[k] = v + else: + raise exceptions.CommandError(err_msg) + + if not nic_info['net-id'] and not nic_info['port-id']: + raise exceptions.CommandError(err_msg) + + nics.append(nic_info) + + hints = {} + if args.scheduler_hints: + for hint in args.scheduler_hints: + key, _sep, value = hint.partition('=') + # NOTE(vish): multiple copies of the same hint will + # result in a list of values + if key in hints: + if isinstance(hints[key], basestring): + hints[key] = [hints[key]] + hints[key] += [value] + else: + hints[key] = value + boot_args = [args.name, image, flavor] + + if str(args.config_drive).lower() in ("true", "1"): + config_drive = True + elif str(args.config_drive).lower() in ("false", "0", "", "none"): + config_drive = None + else: + config_drive = args.config_drive + + boot_kwargs = dict( + meta=meta, + files=files, + key_name=key_name, + reservation_id=reservation_id, + min_count=min_count, + max_count=max_count, + userdata=userdata, + availability_zone=availability_zone, + security_groups=security_groups, + block_device_mapping=block_device_mapping, + block_device_mapping_v2=block_device_mapping_v2, + nics=nics, + scheduler_hints=hints, + config_drive=config_drive) + + return boot_args, boot_kwargs + + +@utils.arg('--flavor', + default=None, + metavar='', + help="Name or ID of flavor (see 'nova flavor-list').") +@utils.arg('--image', + default=None, + metavar='', + help="Name or ID of image (see 'nova image-list'). ") +@utils.arg('--image-with', + default=[], + type=_key_value_pairing, + action='append', + metavar='', + help="Image metadata property (see 'nova image-show'). ") +@utils.arg('--boot-volume', + default=None, + metavar="", + help="Volume ID to boot from.") +@utils.arg('--snapshot', + default=None, + metavar="", + help="Sapshot ID to boot from (will create a volume).") +@utils.arg('--num-instances', + default=None, + type=int, + metavar='', + help="boot multi instances at a time (limited by quota).") +@utils.arg('--meta', + metavar="", + action='append', + default=[], + help="Record arbitrary key/value metadata to /meta.js " + "on the new server. Can be specified multiple times.") +@utils.arg('--file', + metavar="", + action='append', + dest='files', + default=[], + help="Store arbitrary files from locally to " + "on the new server. You may store up to 5 files.") +@utils.arg('--key-name', + metavar='', + help="Key name of keypair that should be created earlier with \ + the command keypair-add") +@utils.arg('--key_name', + help=argparse.SUPPRESS) +@utils.arg('name', metavar='', help='Name for the new server') +@utils.arg('--user-data', + default=None, + metavar='', + help="user data file to pass to be exposed by the metadata server.") +@utils.arg('--user_data', + help=argparse.SUPPRESS) +@utils.arg('--availability-zone', + default=None, + metavar='', + help="The availability zone for instance placement.") +@utils.arg('--availability_zone', + help=argparse.SUPPRESS) +@utils.arg('--security-groups', + default=None, + metavar='', + help="Comma separated list of security group names.") +@utils.arg('--security_groups', + help=argparse.SUPPRESS) +@utils.arg('--block-device-mapping', + metavar="", + action='append', + default=[], + help="Block device mapping in the format " + "=:::.") +@utils.arg('--block_device_mapping', + action='append', + help=argparse.SUPPRESS) +@utils.arg('--block-device', + metavar="key1=value1[,key2=value2...]", + action='append', + default=[], + help="Block device mapping with the keys: " + "id=image_id, snapshot_id or volume_id, " + "source=source type (image, snapshot, volume or blank), " + "dest=destination type of the block device (volume or local), " + "bus=device's bus, " + "device=name of the device (e.g. vda, xda, ...), " + "size=size of the block device in GB, " + "format=device will be formatted (e.g. swap, ext3, ntfs, ...), " + "bootindex=integer used for ordering the boot disks, " + "type=device type (e.g. disk, cdrom, ...) and " + "shutdown=shutdown behaviour (either preserve or remove).") +@utils.arg('--swap', + metavar="", + default=None, + help="Create and attach a local swap block device of MB.") +@utils.arg('--ephemeral', + metavar="size=[,format=]", + action='append', + default=[], + help="Create and attach a local ephemeral block device of GB " + "and format it to .") +@utils.arg('--hint', + action='append', + dest='scheduler_hints', + default=[], + metavar='', + help="Send arbitrary key/value pairs to the scheduler for custom use.") +@utils.arg('--nic', + metavar="", + action='append', + dest='nics', + default=[], + help="Create a NIC on the server. " + "Specify option multiple times to create multiple NICs. " + "net-id: attach NIC to network with this UUID " + "(required if no port-id), " + "v4-fixed-ip: IPv4 fixed address for NIC (optional), " + "port-id: attach NIC to port with this UUID " + "(required if no net-id)") +@utils.arg('--config-drive', + metavar="", + dest='config_drive', + default=False, + help="Enable config drive") +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance builds so progress can be reported.') +def do_boot(cs, args): + """Boot a new server.""" + boot_args, boot_kwargs = _boot(cs, args) + + extra_boot_kwargs = utils.get_resource_manager_extra_kwargs(do_boot, args) + boot_kwargs.update(extra_boot_kwargs) + + server = cs.servers.create(*boot_args, **boot_kwargs) + + # Keep any information (like adminPass) returned by create + info = server._info + server = cs.servers.get(info['id']) + info.update(server._info) + + flavor = info.get('flavor', {}) + flavor_id = flavor.get('id', '') + info['flavor'] = _find_flavor(cs, flavor_id).name + + image = info.get('image', {}) + if image: + image_id = image.get('id', '') + info['image'] = _find_image(cs, image_id).name + else: # Booting from volume + info['image'] = "Attempt to boot from volume - no image supplied" + + info.pop('links', None) + info.pop('addresses', None) + + utils.print_dict(info) + + if args.poll: + _poll_for_status(cs.servers.get, info['id'], 'building', ['active']) + + +def do_cloudpipe_list(cs, _args): + """Print a list of all cloudpipe instances.""" + cloudpipes = cs.cloudpipe.list() + columns = ['Project Id', "Public IP", "Public Port", "Internal IP"] + utils.print_list(cloudpipes, columns) + + +@utils.arg('project', metavar='', help='Name of the project.') +def do_cloudpipe_create(cs, args): + """Create a cloudpipe instance for the given project.""" + cs.cloudpipe.create(args.project) + + +@utils.arg('address', metavar='', help='New IP Address.') +@utils.arg('port', metavar='', help='New Port.') +def do_cloudpipe_configure(cs, args): + """Update the VPN IP/port of a cloudpipe instance.""" + cs.cloudpipe.update(args.address, args.port) + + +def _poll_for_status(poll_fn, obj_id, action, final_ok_states, + poll_period=5, show_progress=True, + status_field="status", silent=False): + """Block while an action is being performed, periodically printing + progress. + """ + def print_progress(progress): + if show_progress: + msg = ('\rInstance %(action)s... %(progress)s%% complete' + % dict(action=action, progress=progress)) + else: + msg = '\rInstance %(action)s...' % dict(action=action) + + sys.stdout.write(msg) + sys.stdout.flush() + + if not silent: + print + + while True: + obj = poll_fn(obj_id) + + status = getattr(obj, status_field) + + if status: + status = status.lower() + + progress = getattr(obj, 'progress', None) or 0 + if status in final_ok_states: + if not silent: + print_progress(100) + print("\nFinished") + break + elif status == "error": + if not silent: + print("\nError %s instance" % action) + break + + if not silent: + print_progress(progress) + + time.sleep(poll_period) + + +def _translate_keys(collection, convert): + for item in collection: + keys = item.__dict__.keys() + for from_key, to_key in convert: + if from_key in keys and to_key not in keys: + setattr(item, to_key, item._info[from_key]) + + +def _translate_extended_states(collection): + power_states = [ + 'NOSTATE', # 0x00 + 'Running', # 0x01 + '', # 0x02 + 'Paused', # 0x03 + 'Shutdown', # 0x04 + '', # 0x05 + 'Crashed', # 0x06 + 'Suspended' # 0x07 + ] + + for item in collection: + try: + setattr(item, 'power_state', + power_states[getattr(item, 'power_state')] + ) + except AttributeError: + setattr(item, 'power_state', "N/A") + try: + getattr(item, 'task_state') + except AttributeError: + setattr(item, 'task_state', "N/A") + + +def _translate_flavor_keys(collection): + _translate_keys(collection, [('ram', 'memory_mb')]) + + +def _print_flavor_extra_specs(flavor): + try: + return flavor.get_keys() + except exceptions.NotFound: + return "N/A" + + +def _print_flavor_list(flavors, show_extra_specs=False): + _translate_flavor_keys(flavors) + + headers = [ + 'ID', + 'Name', + 'Memory_MB', + 'Disk', + 'Ephemeral', + 'Swap', + 'VCPUs', + 'RXTX_Factor', + 'Is_Public', + ] + + if show_extra_specs: + formatters = {'extra_specs': _print_flavor_extra_specs} + headers.append('extra_specs') + else: + formatters = {} + + utils.print_list(flavors, headers, formatters) + + +@utils.arg('--extra-specs', + dest='extra_specs', + action='store_true', + default=False, + help='Get extra-specs of each flavor.') +@utils.arg('--all', + dest='all', + action='store_true', + default=False, + help='Display all flavors (Admin only).') +def do_flavor_list(cs, args): + """Print a list of available 'flavors' (sizes of servers).""" + if args.all: + flavors = cs.flavors.list(is_public=None) + else: + flavors = cs.flavors.list() + _print_flavor_list(flavors, args.extra_specs) + + +@utils.arg('flavor', + metavar='', + help="Name or ID of the flavor to delete") +def do_flavor_delete(cs, args): + """Delete a specific flavor""" + flavorid = _find_flavor(cs, args.flavor) + cs.flavors.delete(flavorid) + _print_flavor_list([flavorid]) + + +@utils.arg('flavor', + metavar='', + help="Name or ID of flavor") +def do_flavor_show(cs, args): + """Show details about the given flavor.""" + flavor = _find_flavor(cs, args.flavor) + _print_flavor(flavor) + + +@utils.arg('name', + metavar='', + help="Name of the new flavor") +@utils.arg('id', + metavar='', + help="Unique ID (integer or UUID) for the new flavor." + " If specifying 'auto', a UUID will be generated as id") +@utils.arg('ram', + metavar='', + help="Memory size in MB") +@utils.arg('disk', + metavar='', + help="Disk size in GB") +@utils.arg('--ephemeral', + metavar='', + help="Ephemeral space size in GB (default 0)", + default=0) +@utils.arg('vcpus', + metavar='', + help="Number of vcpus") +@utils.arg('--swap', + metavar='', + help="Swap space size in MB (default 0)", + default=0) +@utils.arg('--rxtx-factor', + metavar='', + help="RX/TX factor (default 1)", + default=1.0) +@utils.arg('--is-public', + metavar='', + help="Make flavor accessible to the public (default true)", + type=utils.bool_from_str, + default=True) +def do_flavor_create(cs, args): + """Create a new flavor""" + f = cs.flavors.create(args.name, args.ram, args.vcpus, args.disk, args.id, + args.ephemeral, args.swap, args.rxtx_factor, + args.is_public) + _print_flavor_list([f]) + + +@utils.arg('flavor', + metavar='', + help="Name or ID of flavor") +@utils.arg('action', + metavar='', + choices=['set', 'unset'], + help="Actions: 'set' or 'unset'") +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Extra_specs to set/unset (only key is necessary on unset)') +def do_flavor_key(cs, args): + """Set or unset extra_spec for a flavor.""" + flavor = _find_flavor(cs, args.flavor) + keypair = _extract_metadata(args) + + if args.action == 'set': + flavor.set_keys(keypair) + elif args.action == 'unset': + flavor.unset_keys(keypair.keys()) + + +@utils.arg('--flavor', + metavar='', + help="Filter results by flavor name or ID.") +@utils.arg('--tenant', metavar='', + help='Filter results by tenant ID.') +def do_flavor_access_list(cs, args): + """Print access information about the given flavor.""" + if args.flavor and args.tenant: + raise exceptions.CommandError("Unable to filter results by " + "both --flavor and --tenant.") + elif args.flavor: + flavor = _find_flavor(cs, args.flavor) + if flavor.is_public: + raise exceptions.CommandError("Failed to get access list " + "for public flavor type.") + kwargs = {'flavor': flavor} + elif args.tenant: + kwargs = {'tenant': args.tenant} + else: + raise exceptions.CommandError("Unable to get all access lists. " + "Specify --flavor or --tenant") + + try: + access_list = cs.flavor_access.list(**kwargs) + except NotImplementedError as e: + raise exceptions.CommandError("%s" % str(e)) + + columns = ['Flavor_ID', 'Tenant_ID'] + utils.print_list(access_list, columns) + + +@utils.arg('flavor', + metavar='', + help="Filter results by flavor name or ID.") +@utils.arg('tenant', metavar='', + help='Filter results by tenant ID.') +def do_flavor_access_add(cs, args): + """Add flavor access for the given tenant.""" + flavor = _find_flavor_for_admin(cs, args.flavor) + access_list = cs.flavor_access.add_tenant_access(flavor, args.tenant) + columns = ['Flavor_ID', 'Tenant_ID'] + utils.print_list(access_list, columns) + + +@utils.arg('flavor', + metavar='', + help="Filter results by flavor name or ID.") +@utils.arg('tenant', metavar='', + help='Filter results by tenant ID.') +def do_flavor_access_remove(cs, args): + """Remove flavor access for the given tenant.""" + flavor = _find_flavor_for_admin(cs, args.flavor) + access_list = cs.flavor_access.remove_tenant_access(flavor, args.tenant) + columns = ['Flavor_ID', 'Tenant_ID'] + utils.print_list(access_list, columns) + + +@utils.arg('project_id', metavar='', + help='The ID of the project.') +def do_scrub(cs, args): + """Delete data associated with the project.""" + networks_list = cs.networks.list() + networks_list = [network for network in networks_list + if getattr(network, 'project_id', '') == args.project_id] + search_opts = {'all_tenants': 1} + groups = cs.security_groups.list(search_opts) + groups = [group for group in groups + if group.tenant_id == args.project_id] + for network in networks_list: + cs.networks.disassociate(network) + for group in groups: + cs.security_groups.delete(group) + + +def do_network_list(cs, _args): + """Print a list of available networks.""" + network_list = cs.networks.list() + columns = ['ID', 'Label', 'Cidr'] + utils.print_list(network_list, columns) + + +@utils.arg('network', + metavar='', + help="uuid or label of network") +def do_network_show(cs, args): + """Show details about the given network.""" + network = utils.find_resource(cs.networks, args.network) + utils.print_dict(network._info) + + +@utils.arg('--host-only', + dest='host_only', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=0) +@utils.arg('--project-only', + dest='project_only', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=0) +@utils.arg('network', + metavar='', + help="uuid of network") +def do_network_disassociate(cs, args): + """Disassociate host and/or project from the given network.""" + if args.host_only: + cs.networks.disassociate(args.network, True, False) + elif args.project_only: + cs.networks.disassociate(args.network, False, True) + else: + cs.networks.disassociate(args.network, True, True) + + +@utils.arg('network', + metavar='', + help="uuid of network") +@utils.arg('host', + metavar='', + help="Name of host") +def do_network_associate_host(cs, args): + """Associate host with network.""" + cs.networks.associate_host(args.network, args.host) + + +@utils.arg('network', + metavar='', + help="uuid of network") +def do_network_associate_project(cs, args): + """Associate project with network.""" + cs.networks.associate_project(args.network) + + +def _filter_network_create_options(args): + valid_args = ['label', 'cidr', 'vlan_start', 'vpn_start', 'cidr_v6', + 'gateway', 'gateway_v6', 'bridge', 'bridge_interface', + 'multi_host', 'dns1', 'dns2', 'uuid', 'fixed_cidr', + 'project_id', 'priority'] + kwargs = {} + for k, v in args.__dict__.items(): + if k in valid_args and v is not None: + kwargs[k] = v + + return kwargs + + +@utils.arg('label', + metavar='', + help="Label for network") +@utils.arg('--fixed-range-v4', + dest='cidr', + metavar='', + help="IPv4 subnet (ex: 10.0.0.0/8)") +@utils.arg('--fixed-range-v6', + dest="cidr_v6", + help='IPv6 subnet (ex: fe80::/64') +@utils.arg('--vlan', + dest='vlan_start', + metavar='', + help="vlan id") +@utils.arg('--vpn', + dest='vpn_start', + metavar='', + help="vpn start") +@utils.arg('--gateway', + dest="gateway", + help='gateway') +@utils.arg('--gateway-v6', + dest="gateway_v6", + help='ipv6 gateway') +@utils.arg('--bridge', + dest="bridge", + metavar='', + help='VIFs on this network are connected to this bridge') +@utils.arg('--bridge-interface', + dest="bridge_interface", + metavar='', + help='the bridge is connected to this interface') +@utils.arg('--multi-host', + dest="multi_host", + metavar="<'T'|'F'>", + help='Multi host') +@utils.arg('--dns1', + dest="dns1", + metavar="", help='First DNS') +@utils.arg('--dns2', + dest="dns2", + metavar="", + help='Second DNS') +@utils.arg('--uuid', + dest="uuid", + metavar="", + help='Network UUID') +@utils.arg('--fixed-cidr', + dest="fixed_cidr", + metavar='', + help='IPv4 subnet for fixed IPS (ex: 10.20.0.0/16)') +@utils.arg('--project-id', + dest="project_id", + metavar="", + help='Project id') +@utils.arg('--priority', + dest="priority", + metavar="", + help='Network interface priority') +def do_network_create(cs, args): + """Create a network.""" + + if not (args.cidr or args.cidr_v6): + raise exceptions.CommandError( + "Must specify eith fixed_range_v4 or fixed_range_v6") + kwargs = _filter_network_create_options(args) + if args.multi_host is not None: + kwargs['multi_host'] = bool(args.multi_host == 'T' or + strutils.bool_from_string(args.multi_host)) + + cs.networks.create(**kwargs) + + +@utils.arg('--limit', + dest="limit", + metavar="", + help='number of images to return per request') +def do_image_list(cs, _args): + """Print a list of available images to boot from.""" + limit = _args.limit + image_list = cs.images.list(limit=limit) + + def parse_server_name(image): + try: + return image.server['id'] + except (AttributeError, KeyError): + return '' + + fmts = {'Server': parse_server_name} + utils.print_list(image_list, ['ID', 'Name', 'Status', 'Server'], + fmts, sortby_index=1) + + +@utils.arg('image', + metavar='', + help="Name or ID of image") +@utils.arg('action', + metavar='', + choices=['set', 'delete'], + help="Actions: 'set' or 'delete'") +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Metadata to add/update or delete (only key is necessary on delete)') +def do_image_meta(cs, args): + """Set or Delete metadata on an image.""" + image = _find_image(cs, args.image) + metadata = _extract_metadata(args) + + if args.action == 'set': + cs.images.set_meta(image, metadata) + elif args.action == 'delete': + cs.images.delete_meta(image, metadata.keys()) + + +def _extract_metadata(args): + metadata = {} + for metadatum in args.metadata[0]: + # Can only pass the key in on 'delete' + # So this doesn't have to have '=' + if metadatum.find('=') > -1: + (key, value) = metadatum.split('=', 1) + else: + key = metadatum + value = None + + metadata[key] = value + return metadata + + +def _print_image(image): + info = image._info.copy() + + # ignore links, we don't need to present those + info.pop('links') + + # try to replace a server entity to just an id + server = info.pop('server', None) + try: + info['server'] = server['id'] + except (KeyError, TypeError): + pass + + # break up metadata and display each on its own row + metadata = info.pop('metadata', {}) + try: + for key, value in metadata.items(): + _key = 'metadata %s' % key + info[_key] = value + except AttributeError: + pass + + utils.print_dict(info) + + +def _print_flavor(flavor): + info = flavor._info.copy() + # ignore links, we don't need to present those + info.pop('links') + info.update({"extra_specs": _print_flavor_extra_specs(flavor)}) + utils.print_dict(info) + + +@utils.arg('image', + metavar='', + help="Name or ID of image") +def do_image_show(cs, args): + """Show details about the given image.""" + image = _find_image(cs, args.image) + _print_image(image) + + +@utils.arg('image', metavar='', nargs='+', + help='Name or ID of image(s).') +def do_image_delete(cs, args): + """Delete specified image(s).""" + for image in args.image: + try: + _find_image(cs, image).delete() + except Exception as e: + print("Delete for image %s failed: %s" % (image, e)) + + +@utils.arg('--reservation-id', + dest='reservation_id', + metavar='', + default=None, + help='Only return instances that match reservation-id.') +@utils.arg('--reservation_id', + help=argparse.SUPPRESS) +@utils.arg('--ip', + dest='ip', + metavar='', + default=None, + help='Search with regular expression match by IP address (Admin only).') +@utils.arg('--ip6', + dest='ip6', + metavar='', + default=None, + help='Search with regular expression match by IPv6 address (Admin only).') +@utils.arg('--name', + dest='name', + metavar='', + default=None, + help='Search with regular expression match by name') +@utils.arg('--instance-name', + dest='instance_name', + metavar='', + default=None, + help='Search with regular expression match by instance name (Admin only).') +@utils.arg('--instance_name', + help=argparse.SUPPRESS) +@utils.arg('--status', + dest='status', + metavar='', + default=None, + help='Search by server status') +@utils.arg('--flavor', + dest='flavor', + metavar='', + default=None, + help='Search by flavor name or ID') +@utils.arg('--image', + dest='image', + metavar='', + default=None, + help='Search by image name or ID') +@utils.arg('--host', + dest='host', + metavar='', + default=None, + help='Search instances by hostname to which they are assigned ' + '(Admin only).') +@utils.arg('--all-tenants', + dest='all_tenants', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=int(utils.bool_from_str(os.environ.get("ALL_TENANTS", 'false'))), + help='Display information from all tenants (Admin only).') +@utils.arg('--all_tenants', + nargs='?', + type=int, + const=1, + help=argparse.SUPPRESS) +@utils.arg('--tenant', + #nova db searches by project_id + dest='tenant', + metavar='', + nargs='?', + help='Display information from single tenant (Admin only).') +@utils.arg('--fields', + default=None, + metavar='', + help='Comma-separated list of fields to display. ' + 'Use the show command to see which fields are available.') +def do_list(cs, args): + """List active servers.""" + imageid = None + flavorid = None + if args.image: + imageid = _find_image(cs, args.image).id + if args.flavor: + flavorid = _find_flavor(cs, args.flavor).id + search_opts = { + 'all_tenants': args.all_tenants, + 'reservation_id': args.reservation_id, + 'ip': args.ip, + 'ip6': args.ip6, + 'name': args.name, + 'image': imageid, + 'flavor': flavorid, + 'status': args.status, + 'tenant_id': args.tenant, + 'host': args.host, + 'instance_name': args.instance_name} + + filters = {'flavor': lambda f: f['id'], + 'security_groups': utils._format_security_groups} + + formatters = {} + field_titles = [] + if args.fields: + for field in args.fields.split(','): + field_title, formatter = utils._make_field_formatter(field, + filters) + field_titles.append(field_title) + formatters[field_title] = formatter + + id_col = 'ID' + + servers = cs.servers.list(search_opts=search_opts) + convert = [('OS-EXT-SRV-ATTR:host', 'host'), + ('OS-EXT-STS:task_state', 'task_state'), + ('OS-EXT-SRV-ATTR:instance_name', 'instance_name'), + ('OS-EXT-STS:power_state', 'power_state'), + ('hostId', 'host_id')] + _translate_keys(servers, convert) + _translate_extended_states(servers) + if field_titles: + columns = [id_col] + field_titles + else: + columns = [ + id_col, + 'Name', + 'Status', + 'Task State', + 'Power State', + 'Networks' + ] + formatters['Networks'] = utils._format_servers_list_networks + utils.print_list(servers, columns, + formatters, sortby_index=1) + + +@utils.arg('--hard', + dest='reboot_type', + action='store_const', + const=servers.REBOOT_HARD, + default=servers.REBOOT_SOFT, + help='Perform a hard reboot (instead of a soft one).') +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance is rebooting.') +def do_reboot(cs, args): + """Reboot a server.""" + server = _find_server(cs, args.server) + server.reboot(args.reboot_type) + + if args.poll: + _poll_for_status(cs.servers.get, server.id, 'rebooting', ['active'], + show_progress=False) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('image', metavar='', help="Name or ID of new image.") +@utils.arg('--rebuild-password', + dest='rebuild_password', + metavar='', + default=False, + help="Set the provided password on the rebuild instance.") +@utils.arg('--rebuild_password', + help=argparse.SUPPRESS) +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance rebuilds so progress can be reported.') +@utils.arg('--minimal', + dest='minimal', + action="store_true", + default=False, + help='Skips flavor/image lookups when showing instances') +def do_rebuild(cs, args): + """Shutdown, re-image, and re-boot a server.""" + server = _find_server(cs, args.server) + image = _find_image(cs, args.image) + + if args.rebuild_password is not False: + _password = args.rebuild_password + else: + _password = None + + kwargs = utils.get_resource_manager_extra_kwargs(do_rebuild, args) + server.rebuild(image, _password, **kwargs) + _print_server(cs, args) + + if args.poll: + _poll_for_status(cs.servers.get, server.id, 'rebuilding', ['active']) + + +@utils.arg('server', metavar='', + help='Name (old name) or ID of server.') +@utils.arg('name', metavar='', help='New name for the server.') +def do_rename(cs, args): + """Rename a server.""" + _find_server(cs, args.server).update(name=args.name) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('flavor', metavar='', help="Name or ID of new flavor.") +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance resizes so progress can be reported.') +def do_resize(cs, args): + """Resize a server.""" + server = _find_server(cs, args.server) + flavor = _find_flavor(cs, args.flavor) + kwargs = utils.get_resource_manager_extra_kwargs(do_resize, args) + server.resize(flavor, **kwargs) + if args.poll: + _poll_for_status(cs.servers.get, server.id, 'resizing', + ['active', 'verify_resize']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_resize_confirm(cs, args): + """Confirm a previous resize.""" + _find_server(cs, args.server).confirm_resize() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_resize_revert(cs, args): + """Revert a previous resize (and return to the previous VM).""" + _find_server(cs, args.server).revert_resize() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance migrates so progress can be reported.') +def do_migrate(cs, args): + """Migrate a server. The new host will be selected by the scheduler.""" + server = _find_server(cs, args.server) + server.migrate() + + if args.poll: + _poll_for_status(cs.servers.get, server.id, 'migrating', + ['active', 'verify_resize']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_pause(cs, args): + """Pause a server.""" + _find_server(cs, args.server).pause() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_unpause(cs, args): + """Unpause a server.""" + _find_server(cs, args.server).unpause() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_stop(cs, args): + """Stop a server.""" + _find_server(cs, args.server).stop() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_start(cs, args): + """Start a server.""" + _find_server(cs, args.server).start() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_lock(cs, args): + """Lock a server.""" + _find_server(cs, args.server).lock() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_unlock(cs, args): + """Unlock a server.""" + _find_server(cs, args.server).unlock() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_suspend(cs, args): + """Suspend a server.""" + _find_server(cs, args.server).suspend() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_resume(cs, args): + """Resume a server.""" + _find_server(cs, args.server).resume() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_rescue(cs, args): + """Rescue a server.""" + utils.print_dict(_find_server(cs, args.server).rescue()[1]) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_unrescue(cs, args): + """Unrescue a server.""" + _find_server(cs, args.server).unrescue() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_diagnostics(cs, args): + """Retrieve server diagnostics.""" + server = _find_server(cs, args.server) + utils.print_dict(cs.servers.diagnostics(server)[1]) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_root_password(cs, args): + """ + Change the root password for a server. + """ + server = _find_server(cs, args.server) + p1 = getpass.getpass('New password: ') + p2 = getpass.getpass('Again: ') + if p1 != p2: + raise exceptions.CommandError("Passwords do not match.") + server.change_password(p1) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('name', metavar='', help='Name of snapshot.') +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance snapshots so progress can be reported.') +def do_image_create(cs, args): + """Create a new image by taking a snapshot of a running server.""" + server = _find_server(cs, args.server) + image_uuid = cs.servers.create_image(server, args.name) + + if args.poll: + _poll_for_status(cs.images.get, image_uuid, 'snapshotting', + ['active']) + + # NOTE(sirp): A race-condition exists between when the image finishes + # uploading and when the servers's `task_state` is cleared. To account + # for this, we need to poll a second time to ensure the `task_state` is + # cleared before returning, ensuring that a snapshot taken immediately + # after this function returns will succeed. + # + # A better long-term solution will be to separate 'snapshotting' and + # 'image-uploading' in Nova and clear the task-state once the VM + # snapshot is complete but before the upload begins. + task_state_field = "OS-EXT-STS:task_state" + if hasattr(server, task_state_field): + _poll_for_status(cs.servers.get, server.id, 'image_snapshot', + [None], status_field=task_state_field, + show_progress=False, silent=True) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('name', metavar='', help='Name of the backup image.') +@utils.arg('backup_type', metavar='', + help='The backup type, like "daily" or "weekly".') +@utils.arg('rotation', metavar='', + help='Int parameter representing how many backups to keep around.') +def do_backup(cs, args): + """Backup a instance by create a 'backup' type snapshot.""" + _find_server(cs, args.server).backup(args.name, + args.backup_type, + args.rotation) + + +@utils.arg('server', + metavar='', + help="Name or ID of server") +@utils.arg('action', + metavar='', + choices=['set', 'delete'], + help="Actions: 'set' or 'delete'") +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Metadata to set or delete (only key is necessary on delete)') +def do_meta(cs, args): + """Set or Delete metadata on a server.""" + server = _find_server(cs, args.server) + metadata = _extract_metadata(args) + + if args.action == 'set': + cs.servers.set_meta(server, metadata) + elif args.action == 'delete': + cs.servers.delete_meta(server, metadata.keys()) + + +def _print_server(cs, args): + # By default when searching via name we will do a + # findall(name=blah) and due a REST /details which is not the same + # as a .get() and doesn't get the information about flavors and + # images. This fix it as we redo the call with the id which does a + # .get() to get all informations. + server = _find_server(cs, args.server) + + networks = server.networks + info = server._info.copy() + for network_label, address_list in networks.items(): + info['%s network' % network_label] = ', '.join(address_list) + + flavor = info.get('flavor', {}) + flavor_id = flavor.get('id', '') + if args.minimal: + info['flavor'] = flavor_id + else: + info['flavor'] = '%s (%s)' % (_find_flavor(cs, flavor_id).name, + flavor_id) + + image = info.get('image', {}) + if image: + image_id = image.get('id', '') + if args.minimal: + info['image'] = image_id + else: + try: + info['image'] = '%s (%s)' % (_find_image(cs, image_id).name, + image_id) + except Exception: + info['image'] = '%s (%s)' % ("Image not found", image_id) + else: # Booted from volume + info['image'] = "Attempt to boot from volume - no image supplied" + + info.pop('links', None) + info.pop('addresses', None) + + utils.print_dict(info) + + +@utils.arg('--minimal', + dest='minimal', + action="store_true", + default=False, + help='Skips flavor/image lookups when showing instances') +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_show(cs, args): + """Show details about the given server.""" + _print_server(cs, args) + + +@utils.arg('server', metavar='', nargs='+', + help='Name or ID of server(s).') +def do_delete(cs, args): + """Immediately shut down and delete specified server(s).""" + failure_count = 0 + + for server in args.server: + try: + _find_server(cs, server).delete() + except Exception as e: + failure_count += 1 + print(e) + + if failure_count == len(args.server): + raise exceptions.CommandError("Unable to delete any of the specified " + "servers.") + + +def _find_server(cs, server): + """Get a server by name or ID.""" + return utils.find_resource(cs.servers, server) + + +def _find_image(cs, image): + """Get an image by name or ID.""" + return utils.find_resource(cs.images, image) + + +def _find_flavor_for_admin(cs, flavor): + """Get a flavor for administrator by name, ID, or RAM size.""" + try: + return utils.find_resource(cs.flavors, flavor, is_public=None) + except exceptions.NotFound: + return cs.flavors.find(ram=flavor) + + +def _find_flavor(cs, flavor): + """Get a flavor by name, ID, or RAM size.""" + try: + return utils.find_resource(cs.flavors, flavor) + except exceptions.NotFound: + return cs.flavors.find(ram=flavor) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('network_id', + metavar='', + help='Network ID.') +def do_add_fixed_ip(cs, args): + """Add new IP address on a network to server.""" + server = _find_server(cs, args.server) + server.add_fixed_ip(args.network_id) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('address', metavar='
', help='IP Address.') +def do_remove_fixed_ip(cs, args): + """Remove an IP address from a server.""" + server = _find_server(cs, args.server) + server.remove_fixed_ip(args.address) + + +def _find_volume(cs, volume): + """Get a volume by name or ID.""" + return utils.find_resource(cs.volumes, volume) + + +def _find_volume_snapshot(cs, snapshot): + """Get a volume snapshot by name or ID.""" + return utils.find_resource(cs.volume_snapshots, snapshot) + + +def _print_volume(volume): + utils.print_dict(volume._info) + + +def _print_volume_snapshot(snapshot): + utils.print_dict(snapshot._info) + + +def _translate_volume_keys(collection): + _translate_keys(collection, + [('displayName', 'display_name'), + ('volumeType', 'volume_type')]) + + +def _translate_volume_snapshot_keys(collection): + _translate_keys(collection, + [('displayName', 'display_name'), + ('volumeId', 'volume_id')]) + + +def _translate_availability_zone_keys(collection): + _translate_keys(collection, + [('zoneName', 'name'), ('zoneState', 'status')]) + + +@utils.arg('--all-tenants', + dest='all_tenants', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=int(utils.bool_from_str(os.environ.get("ALL_TENANTS", 'false'))), + help='Display information from all tenants (Admin only).') +@utils.arg('--all_tenants', + nargs='?', + type=int, + const=1, + help=argparse.SUPPRESS) +@utils.service_type('volume') +def do_volume_list(cs, args): + """List all the volumes.""" + search_opts = {'all_tenants': args.all_tenants} + volumes = cs.volumes.list(search_opts=search_opts) + _translate_volume_keys(volumes) + + # Create a list of servers to which the volume is attached + for vol in volumes: + servers = [s.get('server_id') for s in vol.attachments] + setattr(vol, 'attached_to', ','.join(map(str, servers))) + utils.print_list(volumes, ['ID', 'Status', 'Display Name', + 'Size', 'Volume Type', 'Attached to']) + + +@utils.arg('volume', metavar='', help='Name or ID of the volume.') +@utils.service_type('volume') +def do_volume_show(cs, args): + """Show details about a volume.""" + volume = _find_volume(cs, args.volume) + _print_volume(volume) + + +@utils.arg('size', + metavar='', + type=int, + help='Size of volume in GB') +@utils.arg('--snapshot-id', + metavar='', + default=None, + help='Optional snapshot id to create the volume from. (Default=None)') +@utils.arg('--snapshot_id', + help=argparse.SUPPRESS) +@utils.arg('--image-id', + metavar='', + help='Optional image id to create the volume from. (Default=None)', + default=None) +@utils.arg('--display-name', + metavar='', + default=None, + help='Optional volume name. (Default=None)') +@utils.arg('--display_name', + help=argparse.SUPPRESS) +@utils.arg('--display-description', + metavar='', + default=None, + help='Optional volume description. (Default=None)') +@utils.arg('--display_description', + help=argparse.SUPPRESS) +@utils.arg('--volume-type', + metavar='', + default=None, + help='Optional volume type. (Default=None)') +@utils.arg('--volume_type', + help=argparse.SUPPRESS) +@utils.arg('--availability-zone', metavar='', + help='Optional Availability Zone for volume. (Default=None)', + default=None) +@utils.service_type('volume') +def do_volume_create(cs, args): + """Add a new volume.""" + volume = cs.volumes.create(args.size, + args.snapshot_id, + args.display_name, + args.display_description, + args.volume_type, + args.availability_zone, + imageRef=args.image_id) + _print_volume(volume) + + +@utils.arg('volume', + metavar='', + help='Name or ID of the volume to delete.') +@utils.service_type('volume') +def do_volume_delete(cs, args): + """Remove a volume.""" + volume = _find_volume(cs, args.volume) + volume.delete() + + +@utils.arg('server', + metavar='', + help='Name or ID of server.') +@utils.arg('volume', + metavar='', + help='ID of the volume to attach.') +@utils.arg('device', metavar='', + help='Name of the device e.g. /dev/vdb. ' + 'Use "auto" for autoassign (if supported)') +def do_volume_attach(cs, args): + """Attach a volume to a server.""" + if args.device == 'auto': + args.device = None + + volume = cs.volumes.create_server_volume(_find_server(cs, args.server).id, + args.volume, + args.device) + _print_volume(volume) + + +@utils.arg('server', + metavar='', + help='Name or ID of server.') +@utils.arg('attachment_id', + metavar='', + help='Attachment ID of the volume.') +@utils.arg('new_volume', + metavar='', + help='ID of the volume to attach.') +def do_volume_update(cs, args): + """Update volume attachment.""" + volume = cs.volumes.update_server_volume(_find_server(cs, args.server).id, + args.attachment_id, + args.new_volume) + _print_volume(volume) + + +@utils.arg('server', + metavar='', + help='Name or ID of server.') +@utils.arg('attachment_id', + metavar='', + help='Attachment ID of the volume.') +def do_volume_detach(cs, args): + """Detach a volume from a server.""" + cs.volumes.delete_server_volume(_find_server(cs, args.server).id, + args.attachment_id) + + +@utils.service_type('volume') +def do_volume_snapshot_list(cs, _args): + """List all the snapshots.""" + snapshots = cs.volume_snapshots.list() + _translate_volume_snapshot_keys(snapshots) + utils.print_list(snapshots, ['ID', 'Volume ID', 'Status', 'Display Name', + 'Size']) + + +@utils.arg('snapshot', + metavar='', + help='Name or ID of the snapshot.') +@utils.service_type('volume') +def do_volume_snapshot_show(cs, args): + """Show details about a snapshot.""" + snapshot = _find_volume_snapshot(cs, args.snapshot) + _print_volume_snapshot(snapshot) + + +@utils.arg('volume_id', + metavar='', + help='ID of the volume to snapshot') +@utils.arg('--force', + metavar='', + help='Optional flag to indicate whether to snapshot a volume even if its ' + 'attached to an instance. (Default=False)', + default=False) +@utils.arg('--display-name', + metavar='', + default=None, + help='Optional snapshot name. (Default=None)') +@utils.arg('--display_name', + help=argparse.SUPPRESS) +@utils.arg('--display-description', + metavar='', + default=None, + help='Optional snapshot description. (Default=None)') +@utils.arg('--display_description', + help=argparse.SUPPRESS) +@utils.service_type('volume') +def do_volume_snapshot_create(cs, args): + """Add a new snapshot.""" + snapshot = cs.volume_snapshots.create(args.volume_id, + args.force, + args.display_name, + args.display_description) + _print_volume_snapshot(snapshot) + + +@utils.arg('snapshot', + metavar='', + help='Name or ID of the snapshot to delete.') +@utils.service_type('volume') +def do_volume_snapshot_delete(cs, args): + """Remove a snapshot.""" + snapshot = _find_volume_snapshot(cs, args.snapshot) + snapshot.delete() + + +def _print_volume_type_list(vtypes): + utils.print_list(vtypes, ['ID', 'Name']) + + +@utils.service_type('volume') +def do_volume_type_list(cs, args): + """Print a list of available 'volume types'.""" + vtypes = cs.volume_types.list() + _print_volume_type_list(vtypes) + + +@utils.arg('name', + metavar='', + help="Name of the new flavor") +@utils.service_type('volume') +def do_volume_type_create(cs, args): + """Create a new volume type.""" + vtype = cs.volume_types.create(args.name) + _print_volume_type_list([vtype]) + + +@utils.arg('id', + metavar='', + help="Unique ID of the volume type to delete") +@utils.service_type('volume') +def do_volume_type_delete(cs, args): + """Delete a specific flavor""" + cs.volume_types.delete(args.id) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('console_type', + metavar='', + help='Type of vnc console ("novnc" or "xvpvnc").') +def do_get_vnc_console(cs, args): + """Get a vnc console to a server.""" + server = _find_server(cs, args.server) + data = server.get_vnc_console(args.console_type) + + class VNCConsole: + def __init__(self, console_dict): + self.type = console_dict['type'] + self.url = console_dict['url'] + + utils.print_list([VNCConsole(data['console'])], ['Type', 'Url']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('console_type', + metavar='', + help='Type of spice console ("spice-html5").') +def do_get_spice_console(cs, args): + """Get a spice console to a server.""" + server = _find_server(cs, args.server) + data = server.get_spice_console(args.console_type) + + class SPICEConsole: + def __init__(self, console_dict): + self.type = console_dict['type'] + self.url = console_dict['url'] + + utils.print_list([SPICEConsole(data['console'])], ['Type', 'Url']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('private_key', + metavar='', + help='Private key (used locally to decrypt password).') +def do_get_password(cs, args): + """Get password for a server.""" + server = _find_server(cs, args.server) + data = server.get_password(args.private_key) + print(data) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_clear_password(cs, args): + """Clear password for a server.""" + server = _find_server(cs, args.server) + server.clear_password() + + +def _print_floating_ip_list(floating_ips): + utils.print_list(floating_ips, ['Ip', 'Instance Id', 'Fixed Ip', 'Pool']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--length', + metavar='', + default=None, + help='Length in lines to tail.') +def do_console_log(cs, args): + """Get console log output of a server.""" + server = _find_server(cs, args.server) + data = server.get_console_output(length=args.length) + print(data) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('address', metavar='
', help='IP Address.') +@utils.arg('--fixed-address', + metavar='', + default=None, + help='Fixed IP Address to associate with.') +def do_add_floating_ip(cs, args): + """Add a floating IP address to a server.""" + server = _find_server(cs, args.server) + server.add_floating_ip(args.address, args.fixed_address) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('address', metavar='
', help='IP Address.') +def do_remove_floating_ip(cs, args): + """Remove a floating IP address from a server.""" + server = _find_server(cs, args.server) + server.remove_floating_ip(args.address) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('secgroup', metavar='', help='Name of Security Group.') +def do_add_secgroup(cs, args): + """Add a Security Group to a server.""" + server = _find_server(cs, args.server) + server.add_security_group(args.secgroup) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('secgroup', metavar='', help='Name of Security Group.') +def do_remove_secgroup(cs, args): + """Remove a Security Group from a server.""" + server = _find_server(cs, args.server) + server.remove_security_group(args.secgroup) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_list_secgroup(cs, args): + """List Security Group(s) of a server.""" + server = _find_server(cs, args.server) + groups = server.list_security_group() + _print_secgroups(groups) + + +@utils.arg('pool', + metavar='', + help='Name of Floating IP Pool. (Optional)', + nargs='?', + default=None) +def do_floating_ip_create(cs, args): + """Allocate a floating IP for the current tenant.""" + _print_floating_ip_list([cs.floating_ips.create(pool=args.pool)]) + + +@utils.arg('address', metavar='
', help='IP of Floating Ip.') +def do_floating_ip_delete(cs, args): + """De-allocate a floating IP.""" + floating_ips = cs.floating_ips.list() + for floating_ip in floating_ips: + if floating_ip.ip == args.address: + return cs.floating_ips.delete(floating_ip.id) + raise exceptions.CommandError("Floating ip %s not found." % args.address) + + +def do_floating_ip_list(cs, _args): + """List floating ips for this tenant.""" + _print_floating_ip_list(cs.floating_ips.list()) + + +def do_floating_ip_pool_list(cs, _args): + """List all floating ip pools.""" + utils.print_list(cs.floating_ip_pools.list(), ['name']) + + +@utils.arg('--host', dest='host', metavar='', default=None, + help='Filter by host') +def do_floating_ip_bulk_list(cs, args): + """List all floating ips.""" + utils.print_list(cs.floating_ips_bulk.list(args.host), ['project_id', + 'address', + 'instance_uuid', + 'pool', + 'interface']) + + +@utils.arg('ip_range', metavar='', help='Address range to create') +@utils.arg('--pool', dest='pool', metavar='', default=None, + help='Pool for new Floating IPs') +@utils.arg('--interface', metavar='', default=None, + help='Interface for new Floating IPs') +def do_floating_ip_bulk_create(cs, args): + """Bulk create floating ips by range.""" + cs.floating_ips_bulk.create(args.ip_range, args.pool, args.interface) + + +@utils.arg('ip_range', metavar='', help='Address range to delete') +def do_floating_ip_bulk_delete(cs, args): + """Bulk delete floating ips by range.""" + cs.floating_ips_bulk.delete(args.ip_range) + + +def _print_dns_list(dns_entries): + utils.print_list(dns_entries, ['ip', 'name', 'domain']) + + +def _print_domain_list(domain_entries): + utils.print_list(domain_entries, ['domain', 'scope', + 'project', 'availability_zone']) + + +def do_dns_domains(cs, args): + """Print a list of available dns domains.""" + domains = cs.dns_domains.domains() + _print_domain_list(domains) + + +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('--ip', metavar='', help='ip address', default=None) +@utils.arg('--name', metavar='', help='DNS name', default=None) +def do_dns_list(cs, args): + """List current DNS entries for domain and ip or domain and name.""" + if not (args.ip or args.name): + raise exceptions.CommandError( + "You must specify either --ip or --name") + if args.name: + entry = cs.dns_entries.get(args.domain, args.name) + _print_dns_list([entry]) + else: + entries = cs.dns_entries.get_for_ip(args.domain, + ip=args.ip) + _print_dns_list(entries) + + +@utils.arg('ip', metavar='', help='ip address') +@utils.arg('name', metavar='', help='DNS name') +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('--type', metavar='', help='dns type (e.g. "A")', default='A') +def do_dns_create(cs, args): + """Create a DNS entry for domain, name and ip.""" + cs.dns_entries.create(args.domain, args.name, args.ip, args.type) + + +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('name', metavar='', help='DNS name') +def do_dns_delete(cs, args): + """Delete the specified DNS entry.""" + cs.dns_entries.delete(args.domain, args.name) + + +@utils.arg('domain', metavar='', help='DNS domain') +def do_dns_delete_domain(cs, args): + """Delete the specified DNS domain.""" + cs.dns_domains.delete(args.domain) + + +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('--availability-zone', + metavar='', + default=None, + help='Limit access to this domain to instances ' + 'in the specified availability zone.') +@utils.arg('--availability_zone', + help=argparse.SUPPRESS) +def do_dns_create_private_domain(cs, args): + """Create the specified DNS domain.""" + cs.dns_domains.create_private(args.domain, + args.availability_zone) + + +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('--project', metavar='', + help='Limit access to this domain to users ' + 'of the specified project.', + default=None) +def do_dns_create_public_domain(cs, args): + """Create the specified DNS domain.""" + cs.dns_domains.create_public(args.domain, + args.project) + + +def _print_secgroup_rules(rules): + class FormattedRule: + def __init__(self, obj): + items = (obj if isinstance(obj, dict) else obj._info).items() + for k, v in items: + if k == 'ip_range': + v = v.get('cidr') + elif k == 'group': + k = 'source_group' + v = v.get('name') + if v is None: + v = '' + + setattr(self, k, v) + + rules = [FormattedRule(rule) for rule in rules] + utils.print_list(rules, ['IP Protocol', 'From Port', 'To Port', + 'IP Range', 'Source Group']) + + +def _print_secgroups(secgroups): + utils.print_list(secgroups, ['Id', 'Name', 'Description']) + + +def _get_secgroup(cs, secgroup): + # Check secgroup is an ID + if utils.is_integer_like(strutils.safe_encode(secgroup)): + try: + return cs.security_groups.get(secgroup) + except exceptions.NotFound: + pass + + # Check secgroup as a name + match_found = False + for s in cs.security_groups.list(): + encoding = (locale.getpreferredencoding() or + sys.stdin.encoding or + 'UTF-8') + s.name = s.name.encode(encoding) + if secgroup == s.name: + if match_found != False: + msg = ("Multiple security group matches found for name" + " '%s', use an ID to be more specific." % secgroup) + raise exceptions.NoUniqueMatch(msg) + match_found = s + if match_found is False: + raise exceptions.CommandError("Secgroup ID or name '%s' not found." + % secgroup) + return match_found + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('ip_proto', + metavar='', + help='IP protocol (icmp, tcp, udp).') +@utils.arg('from_port', + metavar='', + help='Port at start of range.') +@utils.arg('to_port', + metavar='', + help='Port at end of range.') +@utils.arg('cidr', metavar='', help='CIDR for address range.') +def do_secgroup_add_rule(cs, args): + """Add a rule to a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + rule = cs.security_group_rules.create(secgroup.id, + args.ip_proto, + args.from_port, + args.to_port, + args.cidr) + _print_secgroup_rules([rule]) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('ip_proto', + metavar='', + help='IP protocol (icmp, tcp, udp).') +@utils.arg('from_port', + metavar='', + help='Port at start of range.') +@utils.arg('to_port', + metavar='', + help='Port at end of range.') +@utils.arg('cidr', metavar='', help='CIDR for address range.') +def do_secgroup_delete_rule(cs, args): + """Delete a rule from a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + for rule in secgroup.rules: + if (rule['ip_protocol'] and + rule['ip_protocol'].upper() == args.ip_proto.upper() and + rule['from_port'] == int(args.from_port) and + rule['to_port'] == int(args.to_port) and + rule['ip_range']['cidr'] == args.cidr): + _print_secgroup_rules([rule]) + return cs.security_group_rules.delete(rule['id']) + + raise exceptions.CommandError("Rule not found") + + +@utils.arg('name', metavar='', help='Name of security group.') +@utils.arg('description', metavar='', + help='Description of security group.') +def do_secgroup_create(cs, args): + """Create a security group.""" + secgroup = cs.security_groups.create(args.name, args.description) + _print_secgroups([secgroup]) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('name', metavar='', help='Name of security group.') +@utils.arg('description', metavar='', + help='Description of security group.') +def do_secgroup_update(cs, args): + """Update a security group.""" + sg = _get_secgroup(cs, args.secgroup) + secgroup = cs.security_groups.update(sg, args.name, args.description) + _print_secgroups([secgroup]) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +def do_secgroup_delete(cs, args): + """Delete a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + cs.security_groups.delete(secgroup) + _print_secgroups([secgroup]) + + +@utils.arg('--all-tenants', + dest='all_tenants', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=int(utils.bool_from_str(os.environ.get("ALL_TENANTS", 'false'))), + help='Display information from all tenants (Admin only).') +@utils.arg('--all_tenants', + nargs='?', + type=int, + const=1, + help=argparse.SUPPRESS) +def do_secgroup_list(cs, args): + """List security groups for the current tenant.""" + search_opts = {'all_tenants': args.all_tenants} + columns = ['Id', 'Name', 'Description'] + if args.all_tenants: + columns.append('Tenant_ID') + groups = cs.security_groups.list(search_opts=search_opts) + utils.print_list(groups, columns) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +def do_secgroup_list_rules(cs, args): + """List rules for a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + _print_secgroup_rules(secgroup.rules) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('source_group', + metavar='', + help='ID or name of source group.') +@utils.arg('ip_proto', + metavar='', + help='IP protocol (icmp, tcp, udp).') +@utils.arg('from_port', + metavar='', + help='Port at start of range.') +@utils.arg('to_port', + metavar='', + help='Port at end of range.') +def do_secgroup_add_group_rule(cs, args): + """Add a source group rule to a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + source_group = _get_secgroup(cs, args.source_group) + params = {} + params['group_id'] = source_group.id + + if args.ip_proto or args.from_port or args.to_port: + if not (args.ip_proto and args.from_port and args.to_port): + raise exceptions.CommandError("ip_proto, from_port, and to_port" + " must be specified together") + params['ip_protocol'] = args.ip_proto.upper() + params['from_port'] = args.from_port + params['to_port'] = args.to_port + + rule = cs.security_group_rules.create(secgroup.id, **params) + _print_secgroup_rules([rule]) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('source_group', + metavar='', + help='ID or name of source group.') +@utils.arg('ip_proto', + metavar='', + help='IP protocol (icmp, tcp, udp).') +@utils.arg('from_port', + metavar='', + help='Port at start of range.') +@utils.arg('to_port', + metavar='', + help='Port at end of range.') +def do_secgroup_delete_group_rule(cs, args): + """Delete a source group rule from a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + source_group = _get_secgroup(cs, args.source_group) + params = {} + params['group_name'] = source_group.name + + if args.ip_proto or args.from_port or args.to_port: + if not (args.ip_proto and args.from_port and args.to_port): + raise exceptions.CommandError("ip_proto, from_port, and to_port" + " must be specified together") + params['ip_protocol'] = args.ip_proto.upper() + params['from_port'] = int(args.from_port) + params['to_port'] = int(args.to_port) + + for rule in secgroup.rules: + if (rule.get('ip_protocol').upper() == params.get( + 'ip_protocol').upper() and + rule.get('from_port') == params.get('from_port') and + rule.get('to_port') == params.get('to_port') and + rule.get('group', {}).get('name') == + params.get('group_name')): + return cs.security_group_rules.delete(rule['id']) + + raise exceptions.CommandError("Rule not found") + + +@utils.arg('name', metavar='', help='Name of key.') +@utils.arg('--pub-key', + metavar='', + default=None, + help='Path to a public ssh key.') +@utils.arg('--pub_key', + help=argparse.SUPPRESS) +def do_keypair_add(cs, args): + """Create a new key pair for use with instances.""" + name = args.name + pub_key = args.pub_key + + if pub_key: + try: + with open(os.path.expanduser(pub_key)) as f: + pub_key = f.read() + except IOError as e: + raise exceptions.CommandError("Can't open or read '%s': %s" % + (pub_key, e)) + + keypair = cs.keypairs.create(name, pub_key) + + if not pub_key: + private_key = keypair.private_key + print(private_key) + + +@utils.arg('name', metavar='', help='Keypair name to delete.') +def do_keypair_delete(cs, args): + """Delete keypair given by its name.""" + name = args.name + cs.keypairs.delete(name) + + +def do_keypair_list(cs, args): + """Print a list of keypairs for a user""" + keypairs = cs.keypairs.list() + columns = ['Name', 'Fingerprint'] + utils.print_list(keypairs, columns) + + +def _print_keypair(keypair): + kp = keypair._info.copy() + pk = kp.pop('public_key') + utils.print_dict(kp) + print("Public key: %s" % pk) + + +@utils.arg('keypair', + metavar='', + help="Name or ID of keypair") +def do_keypair_show(cs, args): + """Show details about the given keypair.""" + keypair = cs.keypairs.get(args.keypair) + _print_keypair(keypair) + + +@utils.arg('--tenant', + #nova db searches by project_id + dest='tenant', + metavar='', + nargs='?', + help='Display information from single tenant (Admin only).') +@utils.arg('--reserved', + dest='reserved', + action='store_true', + default=False, + help='Include reservations count.') +def do_absolute_limits(cs, args): + """Print a list of absolute limits for a user""" + limits = cs.limits.get(args.reserved, args.tenant).absolute + columns = ['Name', 'Value'] + utils.print_list(limits, columns) + + +def do_rate_limits(cs, args): + """Print a list of rate limits for a user""" + limits = cs.limits.get().rate + columns = ['Verb', 'URI', 'Value', 'Remain', 'Unit', 'Next_Available'] + utils.print_list(limits, columns) + + +@utils.arg('--start', metavar='', + help='Usage range start date ex 2012-01-20 (default: 4 weeks ago)', + default=None) +@utils.arg('--end', metavar='', + help='Usage range end date, ex 2012-01-20 (default: tomorrow) ', + default=None) +def do_usage_list(cs, args): + """List usage data for all tenants.""" + dateformat = "%Y-%m-%d" + rows = ["Tenant ID", "Instances", "RAM MB-Hours", "CPU Hours", + "Disk GB-Hours"] + + now = timeutils.utcnow() + + if args.start: + start = datetime.datetime.strptime(args.start, dateformat) + else: + start = now - datetime.timedelta(weeks=4) + + if args.end: + end = datetime.datetime.strptime(args.end, dateformat) + else: + end = now + datetime.timedelta(days=1) + + def simplify_usage(u): + simplerows = [x.lower().replace(" ", "_") for x in rows] + + setattr(u, simplerows[0], u.tenant_id) + setattr(u, simplerows[1], "%d" % len(u.server_usages)) + setattr(u, simplerows[2], "%.2f" % u.total_memory_mb_usage) + setattr(u, simplerows[3], "%.2f" % u.total_vcpus_usage) + setattr(u, simplerows[4], "%.2f" % u.total_local_gb_usage) + + usage_list = cs.usage.list(start, end, detailed=True) + + print("Usage from %s to %s:" % (start.strftime(dateformat), + end.strftime(dateformat))) + + for usage in usage_list: + simplify_usage(usage) + + utils.print_list(usage_list, rows) + + +@utils.arg('--start', metavar='', + help='Usage range start date ex 2012-01-20 (default: 4 weeks ago)', + default=None) +@utils.arg('--end', metavar='', + help='Usage range end date, ex 2012-01-20 (default: tomorrow) ', + default=None) +@utils.arg('--tenant', metavar='', + default=None, + help='UUID or name of tenant to get usage for.') +def do_usage(cs, args): + """Show usage data for a single tenant.""" + dateformat = "%Y-%m-%d" + rows = ["Instances", "RAM MB-Hours", "CPU Hours", "Disk GB-Hours"] + + now = timeutils.utcnow() + + if args.start: + start = datetime.datetime.strptime(args.start, dateformat) + else: + start = now - datetime.timedelta(weeks=4) + + if args.end: + end = datetime.datetime.strptime(args.end, dateformat) + else: + end = now + datetime.timedelta(days=1) + + def simplify_usage(u): + simplerows = [x.lower().replace(" ", "_") for x in rows] + + setattr(u, simplerows[0], "%d" % len(u.server_usages)) + setattr(u, simplerows[1], "%.2f" % u.total_memory_mb_usage) + setattr(u, simplerows[2], "%.2f" % u.total_vcpus_usage) + setattr(u, simplerows[3], "%.2f" % u.total_local_gb_usage) + + if args.tenant: + usage = cs.usage.get(args.tenant, start, end) + else: + usage = cs.usage.get(cs.client.tenant_id, start, end) + + print("Usage from %s to %s:" % (start.strftime(dateformat), + end.strftime(dateformat))) + + if getattr(usage, 'total_vcpus_usage', None): + simplify_usage(usage) + utils.print_list([usage], rows) + else: + print('None') + + +@utils.arg('pk_filename', + metavar='', + nargs='?', + default='pk.pem', + help='Filename for the private key [Default: pk.pem]') +@utils.arg('cert_filename', + metavar='', + nargs='?', + default='cert.pem', + help='Filename for the X.509 certificate [Default: cert.pem]') +def do_x509_create_cert(cs, args): + """Create x509 cert for a user in tenant.""" + + if os.path.exists(args.pk_filename): + raise exceptions.CommandError("Unable to write privatekey - %s exists." + % args.pk_filename) + if os.path.exists(args.cert_filename): + raise exceptions.CommandError("Unable to write x509 cert - %s exists." + % args.cert_filename) + + certs = cs.certs.create() + + try: + old_umask = os.umask(0o377) + with open(args.pk_filename, 'w') as private_key: + private_key.write(certs.private_key) + print("Wrote private key to %s" % args.pk_filename) + finally: + os.umask(old_umask) + + with open(args.cert_filename, 'w') as cert: + cert.write(certs.data) + print("Wrote x509 certificate to %s" % args.cert_filename) + + +@utils.arg('filename', + metavar='', + nargs='?', + default='cacert.pem', + help='Filename to write the x509 root cert.') +def do_x509_get_root_cert(cs, args): + """Fetch the x509 root cert.""" + if os.path.exists(args.filename): + raise exceptions.CommandError("Unable to write x509 root cert - \ + %s exists." % args.filename) + + with open(args.filename, 'w') as cert: + cacert = cs.certs.get() + cert.write(cacert.data) + print("Wrote x509 root cert to %s" % args.filename) + + +@utils.arg('--hypervisor', metavar='', default=None, + help='type of hypervisor.') +def do_agent_list(cs, args): + """List all builds.""" + result = cs.agents.list(args.hypervisor) + columns = ["Agent_id", "Hypervisor", "OS", "Architecture", "Version", + 'Md5hash', 'Url'] + utils.print_list(result, columns) + + +@utils.arg('os', metavar='', help='type of os.') +@utils.arg('architecture', metavar='', + help='type of architecture') +@utils.arg('version', metavar='', help='version') +@utils.arg('url', metavar='', help='url') +@utils.arg('md5hash', metavar='', help='md5 hash') +@utils.arg('hypervisor', metavar='', default='xen', + help='type of hypervisor.') +def do_agent_create(cs, args): + """Create new agent build.""" + result = cs.agents.create(args.os, args.architecture, + args.version, args.url, + args.md5hash, args.hypervisor) + utils.print_dict(result._info.copy()) + + +@utils.arg('id', metavar='', help='id of the agent-build') +def do_agent_delete(cs, args): + """Delete existing agent build.""" + cs.agents.delete(args.id) + + +@utils.arg('id', metavar='', help='id of the agent-build') +@utils.arg('version', metavar='', help='version') +@utils.arg('url', metavar='', help='url') +@utils.arg('md5hash', metavar='', help='md5hash') +def do_agent_modify(cs, args): + """Modify existing agent build.""" + result = cs.agents.update(args.id, args.version, + args.url, args.md5hash) + utils.print_dict(result._info) + + +def _find_aggregate(cs, aggregate): + """Get a aggregate by name or ID.""" + return utils.find_resource(cs.aggregates, aggregate) + + +def do_aggregate_list(cs, args): + """Print a list of all aggregates.""" + aggregates = cs.aggregates.list() + columns = ['Id', 'Name', 'Availability Zone'] + utils.print_list(aggregates, columns) + + +@utils.arg('name', metavar='', help='Name of aggregate.') +@utils.arg('availability_zone', + metavar='', + default=None, + nargs='?', + help='The availability zone of the aggregate (optional).') +def do_aggregate_create(cs, args): + """Create a new aggregate with the specified details.""" + aggregate = cs.aggregates.create(args.name, args.availability_zone) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', + help='Name or ID of aggregate to delete.') +def do_aggregate_delete(cs, args): + """Delete the aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + cs.aggregates.delete(aggregate) + print("Aggregate %s has been successfully deleted." % aggregate.id) + + +@utils.arg('aggregate', metavar='', + help='Name or ID of aggregate to update.') +@utils.arg('name', metavar='', help='Name of aggregate.') +@utils.arg('availability_zone', + metavar='', + nargs='?', + default=None, + help='The availability zone of the aggregate.') +def do_aggregate_update(cs, args): + """Update the aggregate's name and optionally availability zone.""" + aggregate = _find_aggregate(cs, args.aggregate) + updates = {"name": args.name} + if args.availability_zone: + updates["availability_zone"] = args.availability_zone + + aggregate = cs.aggregates.update(aggregate.id, updates) + print("Aggregate %s has been successfully updated." % aggregate.id) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', + help='Name or ID of aggregate to update.') +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Metadata to add/update to aggregate') +def do_aggregate_set_metadata(cs, args): + """Update the metadata associated with the aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + metadata = _extract_metadata(args) + aggregate = cs.aggregates.set_metadata(aggregate.id, metadata) + print("Aggregate %s has been successfully updated." % aggregate.id) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', help='Name or ID of aggregate.') +@utils.arg('host', metavar='', help='The host to add to the aggregate.') +def do_aggregate_add_host(cs, args): + """Add the host to the specified aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + aggregate = cs.aggregates.add_host(aggregate.id, args.host) + print("Aggregate %s has been successfully updated." % aggregate.id) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', help='Name or ID of aggregate.') +@utils.arg('host', metavar='', + help='The host to remove from the aggregate.') +def do_aggregate_remove_host(cs, args): + """Remove the specified host from the specified aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + aggregate = cs.aggregates.remove_host(aggregate.id, args.host) + print("Aggregate %s has been successfully updated." % aggregate.id) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', help='Name or ID of aggregate.') +def do_aggregate_details(cs, args): + """Show details of the specified aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + _print_aggregate_details(aggregate) + + +def _print_aggregate_details(aggregate): + columns = ['Id', 'Name', 'Availability Zone', 'Hosts', 'Metadata'] + utils.print_list([aggregate], columns) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('host', metavar='', default=None, nargs='?', + help='destination host name.') +@utils.arg('--block-migrate', + action='store_true', + dest='block_migrate', + default=False, + help='True in case of block_migration.\ + (Default=False:live_migration)') +@utils.arg('--block_migrate', + action='store_true', + help=argparse.SUPPRESS) +@utils.arg('--disk-over-commit', + action='store_true', + dest='disk_over_commit', + default=False, + help='Allow overcommit.(Default=False)') +@utils.arg('--disk_over_commit', + action='store_true', + help=argparse.SUPPRESS) +def do_live_migration(cs, args): + """Migrate running instance to a new machine.""" + _find_server(cs, args.server).live_migrate(args.host, + args.block_migrate, + args.disk_over_commit) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--active', action='store_const', dest='state', + default='error', const='active', + help='Request the instance be reset to "active" state instead ' + 'of "error" state (the default).') +def do_reset_state(cs, args): + """Reset the state of an instance.""" + _find_server(cs, args.server).reset_state(args.state) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_reset_network(cs, args): + """Reset network of an instance.""" + _find_server(cs, args.server).reset_network() + + +@utils.arg('--host', metavar='', default=None, + help='Name of host.') +@utils.arg('--binary', metavar='', default=None, + help='Service binary.') +def do_service_list(cs, args): + """Show a list of all running services. Filter by host & binary.""" + result = cs.services.list(host=args.host, binary=args.binary) + columns = ["Binary", "Host", "Zone", "Status", "State", "Updated_at"] + # NOTE(sulo): we check if the response has disabled_reason + # so as not to add the column when the extended ext is not enabled. + if hasattr(result[0], 'disabled_reason'): + columns.append("Disabled Reason") + utils.print_list(result, columns) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('binary', metavar='', help='Service binary.') +def do_service_enable(cs, args): + """Enable the service.""" + result = cs.services.enable(args.host, args.binary) + utils.print_list([result], ['Host', 'Binary', 'Status']) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('binary', metavar='', help='Service binary.') +@utils.arg('--reason', metavar='', + help='Reason for disabling service.') +def do_service_disable(cs, args): + """Disable the service.""" + if args.reason: + result = cs.services.disable_log_reason(args.host, args.binary, + args.reason) + utils.print_list([result], ['Host', 'Binary', 'Status', + 'Disabled Reason']) + else: + result = cs.services.disable(args.host, args.binary) + utils.print_list([result], ['Host', 'Binary', 'Status']) + + +@utils.arg('fixed_ip', metavar='', help='Fixed IP Address.') +def do_fixed_ip_get(cs, args): + """Retrieve info on a fixed ip.""" + result = cs.fixed_ips.get(args.fixed_ip) + utils.print_list([result], ['address', 'cidr', 'hostname', 'host']) + + +@utils.arg('fixed_ip', metavar='', help='Fixed IP Address.') +def do_fixed_ip_reserve(cs, args): + """Reserve a fixed IP.""" + cs.fixed_ips.reserve(args.fixed_ip) + + +@utils.arg('fixed_ip', metavar='', help='Fixed IP Address.') +def do_fixed_ip_unreserve(cs, args): + """Unreserve a fixed IP.""" + cs.fixed_ips.unreserve(args.fixed_ip) + + +@utils.arg('host', metavar='', help='Name of host.') +def do_host_describe(cs, args): + """Describe a specific host.""" + result = cs.hosts.get(args.host) + columns = ["HOST", "PROJECT", "cpu", "memory_mb", "disk_gb"] + utils.print_list(result, columns) + + +@utils.arg('--zone', metavar='', default=None, + help='Filters the list, returning only those ' + 'hosts in the availability zone .') +def do_host_list(cs, args): + """List all hosts by service.""" + columns = ["host_name", "service", "zone"] + result = cs.hosts.list(args.zone) + utils.print_list(result, columns) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('--status', metavar='', default=None, dest='status', + help='Either enable or disable a host.') +@utils.arg('--maintenance', + metavar='', + default=None, + dest='maintenance', + help='Either put or resume host to/from maintenance.') +def do_host_update(cs, args): + """Update host settings.""" + updates = {} + columns = ["HOST"] + if args.status: + updates['status'] = args.status + columns.append("status") + if args.maintenance: + updates['maintenance_mode'] = args.maintenance + columns.append("maintenance_mode") + result = cs.hosts.update(args.host, updates) + utils.print_list([result], columns) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('--action', metavar='', dest='action', + choices=['startup', 'shutdown', 'reboot'], + help='A power action: startup, reboot, or shutdown.') +def do_host_action(cs, args): + """Perform a power action on a host.""" + result = cs.hosts.host_action(args.host, args.action) + utils.print_list([result], ['HOST', 'power_action']) + + +@utils.arg('--combine', + dest='combine', + action="store_true", + default=False, + help='Generate a single report for all services.') +def do_coverage_start(cs, args): + """Start Nova coverage reporting.""" + cs.coverage.start(combine=args.combine) + print("Coverage collection started") + + +def do_coverage_stop(cs, args): + """Stop Nova coverage reporting.""" + out = cs.coverage.stop() + print("Coverage data file path: %s" % out[-1]['path']) + + +@utils.arg('filename', metavar='', help='report filename') +@utils.arg('--html', + dest='html', + action="store_true", + default=False, + help='Generate HTML reports instead of text ones.') +@utils.arg('--xml', + dest='xml', + action="store_true", + default=False, + help='Generate XML reports instead of text ones.') +def do_coverage_report(cs, args): + """Generate coverage report.""" + if args.html == True and args.xml == True: + raise exceptions.CommandError("--html and --xml must not be " + "specified together.") + cov = cs.coverage.report(args.filename, xml=args.xml, html=args.html) + print("Report path: %s" % cov[-1]['path']) + + +def do_coverage_reset(cs, args): + """Reset coverage data.""" + cs.coverage.reset() + print("Coverage data reset") + + +def _find_hypervisor(cs, hypervisor): + """Get a hypervisor by name or ID.""" + return utils.find_resource(cs.hypervisors, hypervisor) + + +@utils.arg('--matching', metavar='', default=None, + help='List hypervisors matching the given .') +def do_hypervisor_list(cs, args): + """List hypervisors.""" + columns = ['ID', 'Hypervisor hostname'] + if args.matching: + utils.print_list(cs.hypervisors.search(args.matching), columns) + else: + # Since we're not outputting detail data, choose + # detailed=False for server-side efficiency + utils.print_list(cs.hypervisors.list(False), columns) + + +@utils.arg('hostname', metavar='', + help='The hypervisor hostname (or pattern) to search for.') +def do_hypervisor_servers(cs, args): + """List instances belonging to specific hypervisors.""" + hypers = cs.hypervisors.search(args.hostname, servers=True) + + class InstanceOnHyper(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + # Massage the result into a list to be displayed + instances = [] + for hyper in hypers: + hyper_host = hyper.hypervisor_hostname + hyper_id = hyper.id + if hasattr(hyper, 'servers'): + instances.extend([InstanceOnHyper(id=serv['uuid'], + name=serv['name'], + hypervisor_hostname=hyper_host, + hypervisor_id=hyper_id) + for serv in hyper.servers]) + + # Output the data + utils.print_list(instances, ['ID', 'Name', 'Hypervisor ID', + 'Hypervisor Hostname']) + + +@utils.arg('hypervisor', + metavar='', + help='Name or ID of the hypervisor to show the details of.') +def do_hypervisor_show(cs, args): + """Display the details of the specified hypervisor.""" + hyper = _find_hypervisor(cs, args.hypervisor) + + # Build up the dict + info = hyper._info.copy() + info['service_id'] = info['service']['id'] + info['service_host'] = info['service']['host'] + del info['service'] + + utils.print_dict(info) + + +@utils.arg('hypervisor', + metavar='', + help='Name or ID of the hypervisor to show the uptime of.') +def do_hypervisor_uptime(cs, args): + """Display the uptime of the specified hypervisor.""" + hyper = _find_hypervisor(cs, args.hypervisor) + hyper = cs.hypervisors.uptime(hyper) + + # Output the uptime information + utils.print_dict(hyper._info.copy()) + + +def do_hypervisor_stats(cs, args): + """Get hypervisor statistics over all compute nodes.""" + stats = cs.hypervisors.statistics() + utils.print_dict(stats._info.copy()) + + +def ensure_service_catalog_present(cs): + if not hasattr(cs.client, 'service_catalog'): + # Turn off token caching and re-auth + cs.client.unauthenticate() + cs.client.use_token_cache(False) + cs.client.authenticate() + + +def do_endpoints(cs, _args): + """Discover endpoints that get returned from the authenticate services.""" + ensure_service_catalog_present(cs) + catalog = cs.client.service_catalog.catalog + for e in catalog['access']['serviceCatalog']: + utils.print_dict(e['endpoints'][0], e['name']) + + +@utils.arg('--wrap', dest='wrap', metavar='', default=64, + help='wrap PKI tokens to a specified length, or 0 to disable') +def do_credentials(cs, _args): + """Show user credentials returned from auth.""" + ensure_service_catalog_present(cs) + catalog = cs.client.service_catalog.catalog + utils.print_dict(catalog['access']['user'], "User Credentials", + wrap=int(_args.wrap)) + utils.print_dict(catalog['access']['token'], "Token", wrap=int(_args.wrap)) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--port', + dest='port', + action='store', + type=int, + default=22, + help='Optional flag to indicate which port to use for ssh. ' + '(Default=22)') +@utils.arg('--private', + dest='private', + action='store_true', + default=False, + help='Optional flag to indicate whether to use private address ' + 'attached to an instance. (Default=False)') +@utils.arg('--ipv6', + dest='ipv6', + action='store_true', + default=False, + help='Optional flag to indicate whether to use an IPv6 address ' + 'attached to an instance. (Defaults to IPv4 address)') +@utils.arg('--login', metavar='', help='Login to use.', default="root") +@utils.arg('-i', '--identity', + dest='identity', + help='Private key file, same as the -i option to the ssh command.', + default='') +@utils.arg('--extra-opts', + dest='extra', + help='Extra options to pass to ssh. see: man ssh', + default='') +def do_ssh(cs, args): + """SSH into a server.""" + addresses = _find_server(cs, args.server).addresses + address_type = "private" if args.private else "public" + version = 6 if args.ipv6 else 4 + + if address_type not in addresses: + print("ERROR: No %s addresses found for '%s'." % (address_type, + args.server)) + return + + ip_address = None + for address in addresses[address_type]: + if address['version'] == version: + ip_address = address['addr'] + break + + identity = '-i %s' % args.identity if len(args.identity) else '' + + if ip_address: + os.system("ssh -%d -p%d %s %s@%s %s" % (version, args.port, identity, + args.login, ip_address, + args.extra)) + else: + pretty_version = "IPv%d" % version + print("ERROR: No %s %s address found." % (address_type, + pretty_version)) + return + + +_quota_resources = ['instances', 'cores', 'ram', 'volumes', 'gigabytes', + 'floating_ips', 'fixed_ips', 'metadata_items', + 'injected_files', 'injected_file_content_bytes', + 'injected_file_path_bytes', 'key_pairs', + 'security_groups', 'security_group_rules'] + + +def _quota_show(quotas): + class FormattedQuota(object): + def __init__(self, key, value): + setattr(self, 'quota', key) + setattr(self, 'limit', value) + + quota_list = [] + for resource in _quota_resources: + try: + quota = FormattedQuota(resource, getattr(quotas, resource)) + quota_list.append(quota) + except AttributeError: + pass + columns = ['Quota', 'Limit'] + utils.print_list(quota_list, columns) + + +def _quota_update(manager, identifier, args): + updates = {} + for resource in _quota_resources: + val = getattr(args, resource, None) + if val is not None: + updates[resource] = val + + if updates: + # default value of force is None to make sure this client + # will be compatibile with old nova server + force_update = getattr(args, 'force', None) + user_id = getattr(args, 'user', None) + if isinstance(manager, quotas.QuotaSetManager): + manager.update(identifier, force=force_update, user_id=user_id, + **updates) + else: + manager.update(identifier, **updates) + + +@utils.arg('--tenant', + metavar='', + default=None, + help='ID of tenant to list the quotas for.') +@utils.arg('--user', + metavar='', + default=None, + help='ID of user to list the quotas for.') +def do_quota_show(cs, args): + """List the quotas for a tenant/user.""" + + if not args.tenant: + _quota_show(cs.quotas.get(cs.client.tenant_id, user_id=args.user)) + else: + _quota_show(cs.quotas.get(args.tenant, user_id=args.user)) + + +@utils.arg('--tenant', + metavar='', + default=None, + help='ID of tenant to list the default quotas for.') +def do_quota_defaults(cs, args): + """List the default quotas for a tenant.""" + + if not args.tenant: + _quota_show(cs.quotas.defaults(cs.client.tenant_id)) + else: + _quota_show(cs.quotas.defaults(args.tenant)) + + +@utils.arg('tenant', + metavar='', + help='ID of tenant to set the quotas for.') +@utils.arg('--user', + metavar='', + default=None, + help='ID of user to set the quotas for.') +@utils.arg('--instances', + metavar='', + type=int, default=None, + help='New value for the "instances" quota.') +@utils.arg('--cores', + metavar='', + type=int, default=None, + help='New value for the "cores" quota.') +@utils.arg('--ram', + metavar='', + type=int, default=None, + help='New value for the "ram" quota.') +@utils.arg('--volumes', + metavar='', + type=int, default=None, + help='New value for the "volumes" quota.') +@utils.arg('--gigabytes', + metavar='', + type=int, default=None, + help='New value for the "gigabytes" quota.') +@utils.arg('--floating-ips', + metavar='', + type=int, + default=None, + help='New value for the "floating-ips" quota.') +@utils.arg('--floating_ips', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--fixed-ips', + metavar='', + type=int, + default=None, + help='New value for the "fixed-ips" quota.') +@utils.arg('--metadata-items', + metavar='', + type=int, + default=None, + help='New value for the "metadata-items" quota.') +@utils.arg('--metadata_items', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-files', + metavar='', + type=int, + default=None, + help='New value for the "injected-files" quota.') +@utils.arg('--injected_files', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-file-content-bytes', + metavar='', + type=int, + default=None, + help='New value for the "injected-file-content-bytes" quota.') +@utils.arg('--injected_file_content_bytes', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-file-path-bytes', + metavar='', + type=int, + default=None, + help='New value for the "injected-file-path-bytes" quota.') +@utils.arg('--key-pairs', + metavar='', + type=int, + default=None, + help='New value for the "key-pairs" quota.') +@utils.arg('--security-groups', + metavar='', + type=int, + default=None, + help='New value for the "security-groups" quota.') +@utils.arg('--security-group-rules', + metavar='', + type=int, + default=None, + help='New value for the "security-group-rules" quota.') +@utils.arg('--force', + dest='force', + action="store_true", + default=None, + help='Whether force update the quota even if the already used' + ' and reserved exceeds the new quota') +def do_quota_update(cs, args): + """Update the quotas for a tenant/user.""" + + _quota_update(cs.quotas, args.tenant, args) + + +@utils.arg('--tenant', + metavar='', + help='ID of tenant to delete quota for.') +@utils.arg('--user', + metavar='', + help='ID of user to delete quota for.') +def do_quota_delete(cs, args): + """Delete quota for a tenant/user so their quota will Revert + back to default. + """ + + cs.quotas.delete(args.tenant, user_id=args.user) + + +@utils.arg('class_name', + metavar='', + help='Name of quota class to list the quotas for.') +def do_quota_class_show(cs, args): + """List the quotas for a quota class.""" + + _quota_show(cs.quota_classes.get(args.class_name)) + + +@utils.arg('class_name', + metavar='', + help='Name of quota class to set the quotas for.') +@utils.arg('--instances', + metavar='', + type=int, default=None, + help='New value for the "instances" quota.') +@utils.arg('--cores', + metavar='', + type=int, default=None, + help='New value for the "cores" quota.') +@utils.arg('--ram', + metavar='', + type=int, default=None, + help='New value for the "ram" quota.') +@utils.arg('--volumes', + metavar='', + type=int, default=None, + help='New value for the "volumes" quota.') +@utils.arg('--gigabytes', + metavar='', + type=int, default=None, + help='New value for the "gigabytes" quota.') +@utils.arg('--floating-ips', + metavar='', + type=int, + default=None, + help='New value for the "floating-ips" quota.') +@utils.arg('--floating_ips', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--metadata-items', + metavar='', + type=int, + default=None, + help='New value for the "metadata-items" quota.') +@utils.arg('--metadata_items', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-files', + metavar='', + type=int, + default=None, + help='New value for the "injected-files" quota.') +@utils.arg('--injected_files', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-file-content-bytes', + metavar='', + type=int, + default=None, + help='New value for the "injected-file-content-bytes" quota.') +@utils.arg('--injected_file_content_bytes', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-file-path-bytes', + metavar='', + type=int, + default=None, + help='New value for the "injected-file-path-bytes" quota.') +@utils.arg('--key-pairs', + metavar='', + type=int, + default=None, + help='New value for the "key-pairs" quota.') +@utils.arg('--security-groups', + metavar='', + type=int, + default=None, + help='New value for the "security-groups" quota.') +@utils.arg('--security-group-rules', + metavar='', + type=int, + default=None, + help='New value for the "security-group-rules" quota.') +def do_quota_class_update(cs, args): + """Update the quotas for a quota class.""" + + _quota_update(cs.quota_classes, args.class_name, args) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('host', metavar='', help='Name or ID of target host.') +@utils.arg('--password', + dest='password', + metavar='', + default=None, + help="Set the provided password on the evacuated instance. Not applicable " + "with on-shared-storage flag") +@utils.arg('--on-shared-storage', + dest='on_shared_storage', + action="store_true", + default=False, + help='Specifies whether instance files located on shared storage') +def do_evacuate(cs, args): + """Evacuate server from failed host to specified one.""" + server = _find_server(cs, args.server) + + res = server.evacuate(args.host, args.on_shared_storage, args.password)[1] + if type(res) is dict: + utils.print_dict(res) + + +def _print_interfaces(interfaces): + columns = ['Port State', 'Port ID', 'Net ID', 'IP addresses', + 'MAC Addr'] + + class FormattedInterface(object): + def __init__(self, interface): + for col in columns: + key = col.lower().replace(" ", "_") + if hasattr(interface, key): + setattr(self, key, getattr(interface, key)) + self.ip_addresses = ",".join([fip['ip_address'] + for fip in interface.fixed_ips]) + utils.print_list([FormattedInterface(i) for i in interfaces], columns) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_interface_list(cs, args): + """List interfaces attached to an instance.""" + server = _find_server(cs, args.server) + + res = server.interface_list() + if type(res) is list: + _print_interfaces(res) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--port-id', metavar='', help='Port ID.', dest="port_id") +@utils.arg('--net-id', metavar='', help='Network ID', + default=None, dest="net_id") +@utils.arg('--fixed-ip', metavar='', help='Requested fixed IP.', + default=None, dest="fixed_ip") +def do_interface_attach(cs, args): + """Attach a network interface to an instance.""" + server = _find_server(cs, args.server) + + res = server.interface_attach(args.port_id, args.net_id, args.fixed_ip) + if type(res) is dict: + utils.print_dict(res) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('port_id', metavar='', help='Port ID.') +def do_interface_detach(cs, args): + """Detach a network interface from an instance.""" + server = _find_server(cs, args.server) + + res = server.interface_detach(args.port_id) + if type(res) is dict: + utils.print_dict(res) + + +def _treeizeAvailabilityZone(zone): + """Build a tree view for availability zones.""" + AvailabilityZone = availability_zones.AvailabilityZone + + az = AvailabilityZone(zone.manager, + copy.deepcopy(zone._info), zone._loaded) + result = [] + + # Zone tree view item + az.zoneName = zone.zoneName + az.zoneState = ('available' + if zone.zoneState['available'] else 'not available') + az._info['zoneName'] = az.zoneName + az._info['zoneState'] = az.zoneState + result.append(az) + + if zone.hosts is not None: + for (host, services) in zone.hosts.items(): + # Host tree view item + az = AvailabilityZone(zone.manager, + copy.deepcopy(zone._info), zone._loaded) + az.zoneName = '|- %s' % host + az.zoneState = '' + az._info['zoneName'] = az.zoneName + az._info['zoneState'] = az.zoneState + result.append(az) + + for (svc, state) in services.items(): + # Service tree view item + az = AvailabilityZone(zone.manager, + copy.deepcopy(zone._info), zone._loaded) + az.zoneName = '| |- %s' % svc + az.zoneState = '%s %s %s' % ( + 'enabled' if state['active'] else 'disabled', + ':-)' if state['available'] else 'XXX', + state['updated_at']) + az._info['zoneName'] = az.zoneName + az._info['zoneState'] = az.zoneState + result.append(az) + return result + + +@utils.service_type('compute') +def do_availability_zone_list(cs, _args): + """List all the availability zones.""" + try: + availability_zones = cs.availability_zones.list() + except exceptions.Forbidden as e: # policy doesn't allow probably + try: + availability_zones = cs.availability_zones.list(detailed=False) + except Exception: + raise e + + result = [] + for zone in availability_zones: + result += _treeizeAvailabilityZone(zone) + _translate_availability_zone_keys(result) + utils.print_list(result, ['Name', 'Status'], + sortby_index=None) diff --git a/awx/lib/site-packages/novaclient/v1_1/usage.py b/awx/lib/site-packages/novaclient/v1_1/usage.py new file mode 100644 index 0000000000..880434046e --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/usage.py @@ -0,0 +1,48 @@ +""" +Usage interface. +""" + +from novaclient import base + + +class Usage(base.Resource): + """ + Usage contains information about a tenant's physical resource usage + """ + def __repr__(self): + return "" + + +class UsageManager(base.ManagerWithFind): + """ + Manage :class:`Usage` resources. + """ + resource_class = Usage + + def list(self, start, end, detailed=False): + """ + Get usage for all tenants + + :param start: :class:`datetime.datetime` Start date + :param end: :class:`datetime.datetime` End date + :param detailed: Whether to include information about each + instance whose usage is part of the report + :rtype: list of :class:`Usage`. + """ + return self._list( + "/os-simple-tenant-usage?start=%s&end=%s&detailed=%s" % + (start.isoformat(), end.isoformat(), int(bool(detailed))), + "tenant_usages") + + def get(self, tenant_id, start, end): + """ + Get usage for a specific tenant. + + :param tenant_id: Tenant ID to fetch usage for + :param start: :class:`datetime.datetime` Start date + :param end: :class:`datetime.datetime` End date + :rtype: :class:`Usage` + """ + return self._get("/os-simple-tenant-usage/%s?start=%s&end=%s" % + (tenant_id, start.isoformat(), end.isoformat()), + "tenant_usage") diff --git a/awx/lib/site-packages/novaclient/v1_1/virtual_interfaces.py b/awx/lib/site-packages/novaclient/v1_1/virtual_interfaces.py new file mode 100644 index 0000000000..9c04e8d866 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/virtual_interfaces.py @@ -0,0 +1,33 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Virtual Interfaces (1.1 extension). +""" + +from novaclient import base + + +class VirtualInterface(base.Resource): + def __repr__(self): + pass + + +class VirtualInterfaceManager(base.ManagerWithFind): + resource_class = VirtualInterface + + def list(self, instance_id): + return self._list('/servers/%s/os-virtual-interfaces' % instance_id, + 'virtual_interfaces') diff --git a/awx/lib/site-packages/novaclient/v1_1/volume_snapshots.py b/awx/lib/site-packages/novaclient/v1_1/volume_snapshots.py new file mode 100644 index 0000000000..b30a60a826 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/volume_snapshots.py @@ -0,0 +1,90 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Volume snapshot interface (1.1 extension). +""" + +from novaclient import base + + +class Snapshot(base.Resource): + """ + A Snapshot is a point-in-time snapshot of an openstack volume. + """ + NAME_ATTR = 'display_name' + + def __repr__(self): + return "" % self.id + + def delete(self): + """ + Delete this snapshot. + """ + self.manager.delete(self) + + +class SnapshotManager(base.ManagerWithFind): + """ + Manage :class:`Snapshot` resources. + """ + resource_class = Snapshot + + def create(self, volume_id, force=False, + display_name=None, display_description=None): + + """ + Create a snapshot of the given volume. + + :param volume_id: The ID of the volume to snapshot. + :param force: If force is True, create a snapshot even if the volume is + attached to an instance. Default is False. + :param display_name: Name of the snapshot + :param display_description: Description of the snapshot + :rtype: :class:`Snapshot` + """ + body = {'snapshot': {'volume_id': volume_id, + 'force': force, + 'display_name': display_name, + 'display_description': display_description}} + return self._create('/snapshots', body, 'snapshot') + + def get(self, snapshot_id): + """ + Get a snapshot. + + :param snapshot_id: The ID of the snapshot to get. + :rtype: :class:`Snapshot` + """ + return self._get("/snapshots/%s" % snapshot_id, "snapshot") + + def list(self, detailed=True): + """ + Get a list of all snapshots. + + :rtype: list of :class:`Snapshot` + """ + if detailed is True: + return self._list("/snapshots/detail", "snapshots") + else: + return self._list("/snapshots", "snapshots") + + def delete(self, snapshot): + """ + Delete a snapshot. + + :param snapshot: The :class:`Snapshot` to delete. + """ + self._delete("/snapshots/%s" % base.getid(snapshot)) diff --git a/awx/lib/site-packages/novaclient/v1_1/volume_types.py b/awx/lib/site-packages/novaclient/v1_1/volume_types.py new file mode 100644 index 0000000000..3d1c7f5316 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/volume_types.py @@ -0,0 +1,77 @@ +# Copyright (c) 2011 Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Volume Type interface. +""" + +from novaclient import base + + +class VolumeType(base.Resource): + """ + A Volume Type is the type of volume to be created + """ + def __repr__(self): + return "" % self.name + + +class VolumeTypeManager(base.ManagerWithFind): + """ + Manage :class:`VolumeType` resources. + """ + resource_class = VolumeType + + def list(self): + """ + Get a list of all volume types. + + :rtype: list of :class:`VolumeType`. + """ + return self._list("/types", "volume_types") + + def get(self, volume_type): + """ + Get a specific volume type. + + :param volume_type: The ID of the :class:`VolumeType` to get. + :rtype: :class:`VolumeType` + """ + return self._get("/types/%s" % base.getid(volume_type), "volume_type") + + def delete(self, volume_type): + """ + Delete a specific volume_type. + + :param volume_type: The ID of the :class:`VolumeType` to get. + """ + self._delete("/types/%s" % base.getid(volume_type)) + + def create(self, name): + """ + Create a volume type. + + :param name: Descriptive name of the volume type + :rtype: :class:`VolumeType` + """ + + body = { + "volume_type": { + "name": name, + } + } + + return self._create("/types", body, "volume_type") diff --git a/awx/lib/site-packages/novaclient/v1_1/volumes.py b/awx/lib/site-packages/novaclient/v1_1/volumes.py new file mode 100644 index 0000000000..1c47920152 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v1_1/volumes.py @@ -0,0 +1,165 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Volume interface (1.1 extension). +""" + +import six + +from novaclient import base +from novaclient.openstack.common.py3kcompat import urlutils + + +class Volume(base.Resource): + """ + A volume is an extra block level storage to the OpenStack instances. + """ + NAME_ATTR = 'display_name' + + def __repr__(self): + return "" % self.id + + def delete(self): + """ + Delete this volume. + """ + self.manager.delete(self) + + +class VolumeManager(base.ManagerWithFind): + """ + Manage :class:`Volume` resources. + """ + resource_class = Volume + + def create(self, size, snapshot_id=None, + display_name=None, display_description=None, + volume_type=None, availability_zone=None, + imageRef=None): + """ + Create a volume. + + :param size: Size of volume in GB + :param snapshot_id: ID of the snapshot + :param display_name: Name of the volume + :param display_description: Description of the volume + :param volume_type: Type of volume + :param availability_zone: Availability Zone for volume + :rtype: :class:`Volume` + :param imageRef: reference to an image stored in glance + """ + body = {'volume': {'size': size, + 'snapshot_id': snapshot_id, + 'display_name': display_name, + 'display_description': display_description, + 'volume_type': volume_type, + 'availability_zone': availability_zone, + 'imageRef': imageRef}} + return self._create('/volumes', body, 'volume') + + def get(self, volume_id): + """ + Get a volume. + + :param volume_id: The ID of the volume to delete. + :rtype: :class:`Volume` + """ + return self._get("/volumes/%s" % volume_id, "volume") + + def list(self, detailed=True, search_opts=None): + """ + Get a list of all volumes. + + :rtype: list of :class:`Volume` + """ + search_opts = search_opts or {} + + qparams = dict((k, v) for (k, v) in six.iteritems(search_opts) if v) + + query_string = '?%s' % urlutils.urlencode(qparams) if qparams else '' + + if detailed is True: + return self._list("/volumes/detail%s" % query_string, "volumes") + else: + return self._list("/volumes%s" % query_string, "volumes") + + def delete(self, volume): + """ + Delete a volume. + + :param volume: The :class:`Volume` to delete. + """ + self._delete("/volumes/%s" % base.getid(volume)) + + def create_server_volume(self, server_id, volume_id, device): + """ + Attach a volume identified by the volume ID to the given server ID + + :param server_id: The ID of the server + :param volume_id: The ID of the volume to attach. + :param device: The device name + :rtype: :class:`Volume` + """ + body = {'volumeAttachment': {'volumeId': volume_id, + 'device': device}} + return self._create("/servers/%s/os-volume_attachments" % server_id, + body, "volumeAttachment") + + def update_server_volume(self, server_id, attachment_id, new_volume_id): + """ + Update the volume identified by the attachment ID, that is attached to + the given server ID + + :param server_id: The ID of the server + :param attachment_id: The ID of the attachment + :param new_volume_id: The ID of the new volume to attach + :rtype: :class:`Volume` + """ + body = {'volumeAttachment': {'volumeId': new_volume_id}} + return self._update("/servers/%s/os-volume_attachments/%s" % + (server_id, attachment_id,), body, "volumeAttachment") + + def get_server_volume(self, server_id, attachment_id): + """ + Get the volume identified by the attachment ID, that is attached to + the given server ID + + :param server_id: The ID of the server + :param attachment_id: The ID of the attachment + :rtype: :class:`Volume` + """ + return self._get("/servers/%s/os-volume_attachments/%s" % (server_id, + attachment_id,), "volumeAttachment") + + def get_server_volumes(self, server_id): + """ + Get a list of all the attached volumes for the given server ID + + :param server_id: The ID of the server + :rtype: list of :class:`Volume` + """ + return self._list("/servers/%s/os-volume_attachments" % server_id, + "volumeAttachments") + + def delete_server_volume(self, server_id, attachment_id): + """ + Detach a volume identified by the attachment ID from the given server + + :param server_id: The ID of the server + :param attachment_id: The ID of the attachment + """ + self._delete("/servers/%s/os-volume_attachments/%s" % + (server_id, attachment_id,)) diff --git a/awx/lib/site-packages/novaclient/v3/__init__.py b/awx/lib/site-packages/novaclient/v3/__init__.py new file mode 100644 index 0000000000..a442be1095 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v3/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2012 OpenStack Foundation +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient.v3.client import Client # noqa diff --git a/awx/lib/site-packages/novaclient/v3/client.py b/awx/lib/site-packages/novaclient/v3/client.py new file mode 100644 index 0000000000..8a8d814f92 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v3/client.py @@ -0,0 +1,98 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from novaclient import client + + +class Client(object): + """ + Top-level object to access the OpenStack Compute API. + + Create an instance with your creds:: + + >>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL) + + Then call methods on its managers:: + + >>> client.servers.list() + ... + >>> client.flavors.list() + ... + + """ + + # FIXME(jesse): project_id isn't required to authenticate + def __init__(self, username, password, project_id, auth_url=None, + insecure=False, timeout=None, proxy_tenant_id=None, + proxy_token=None, region_name=None, + endpoint_type='publicURL', extensions=None, + service_type='compute', service_name=None, + volume_service_name=None, timings=False, + bypass_url=None, os_cache=False, no_cache=True, + http_log_debug=False, auth_system='keystone', + auth_plugin=None, + cacert=None, tenant_id=None): + #TODO(bnemec): Add back in v3 extensions + + # Add in any extensions... + if extensions: + for extension in extensions: + if extension.manager_class: + setattr(self, extension.name, + extension.manager_class(self)) + + self.client = client.HTTPClient(username, + password, + projectid=project_id, + tenant_id=tenant_id, + auth_url=auth_url, + insecure=insecure, + timeout=timeout, + auth_system=auth_system, + auth_plugin=auth_plugin, + proxy_token=proxy_token, + proxy_tenant_id=proxy_tenant_id, + region_name=region_name, + endpoint_type=endpoint_type, + service_type=service_type, + service_name=service_name, + volume_service_name=volume_service_name, + timings=timings, + bypass_url=bypass_url, + os_cache=os_cache, + http_log_debug=http_log_debug, + cacert=cacert) + + def set_management_url(self, url): + self.client.set_management_url(url) + + def get_timings(self): + return self.client.get_timings() + + def reset_timings(self): + self.client.reset_timings() + + def authenticate(self): + """ + Authenticate against the server. + + Normally this is called automatically when you first access the API, + but you can call this method to force authentication right now. + + Returns on success; raises :exc:`exceptions.Unauthorized` if the + credentials are wrong. + """ + self.client.authenticate() diff --git a/awx/lib/site-packages/novaclient/v3/shell.py b/awx/lib/site-packages/novaclient/v3/shell.py new file mode 100644 index 0000000000..8debf11511 --- /dev/null +++ b/awx/lib/site-packages/novaclient/v3/shell.py @@ -0,0 +1,3252 @@ +# Copyright 2010 Jacob Kaplan-Moss + +# Copyright 2011 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import argparse +import copy +import datetime +import getpass +import locale +import os +import sys +import time + +from novaclient import exceptions +from novaclient.openstack.common import strutils +from novaclient.openstack.common import timeutils +from novaclient.openstack.common import uuidutils +from novaclient import utils +from novaclient.v1_1 import availability_zones +from novaclient.v1_1 import quotas +from novaclient.v1_1 import servers + + +def _key_value_pairing(text): + try: + (k, v) = text.split('=', 1) + return (k, v) + except ValueError: + msg = "%r is not in the format of key=value" % text + raise argparse.ArgumentTypeError(msg) + + +def _match_image(cs, wanted_properties): + image_list = cs.images.list() + images_matched = [] + match = set(wanted_properties) + for img in image_list: + try: + if match == match.intersection(set(img.metadata.items())): + images_matched.append(img) + except AttributeError: + pass + return images_matched + + +def _boot(cs, args, reservation_id=None, min_count=None, max_count=None): + """Boot a new server.""" + if min_count is None: + min_count = 1 + if max_count is None: + max_count = min_count + if min_count > max_count: + raise exceptions.CommandError("min_instances should be <= " + "max_instances") + if not min_count or not max_count: + raise exceptions.CommandError("min_instances nor max_instances should" + "be 0") + + if args.image: + image = _find_image(cs, args.image) + else: + image = None + + if not image and args.image_with: + images = _match_image(cs, args.image_with) + if images: + # TODO(harlowja): log a warning that we + # are selecting the first of many? + image = images[0] + + if not image and not args.block_device_mapping: + raise exceptions.CommandError("you need to specify an Image ID " + "or a block device mapping " + "or provide a set of properties to match" + " against an image") + if not args.flavor: + raise exceptions.CommandError("you need to specify a Flavor ID ") + + if args.num_instances is not None: + if args.num_instances <= 1: + raise exceptions.CommandError("num_instances should be > 1") + max_count = args.num_instances + + flavor = _find_flavor(cs, args.flavor) + + meta = dict(v.split('=', 1) for v in args.meta) + + files = {} + for f in args.files: + try: + dst, src = f.split('=', 1) + files[dst] = open(src) + except IOError as e: + raise exceptions.CommandError("Can't open '%s': %s" % (src, e)) + except ValueError as e: + raise exceptions.CommandError("Invalid file argument '%s'. File " + "arguments must be of the form '--file '" % f) + + # use the os-keypair extension + key_name = None + if args.key_name is not None: + key_name = args.key_name + + if args.user_data: + try: + userdata = open(args.user_data) + except IOError as e: + raise exceptions.CommandError("Can't open '%s': %s" % + (args.user_data, e)) + else: + userdata = None + + if args.availability_zone: + availability_zone = args.availability_zone + else: + availability_zone = None + + if args.security_groups: + security_groups = args.security_groups.split(',') + else: + security_groups = None + + block_device_mapping = {} + for bdm in args.block_device_mapping: + device_name, mapping = bdm.split('=', 1) + block_device_mapping[device_name] = mapping + + nics = [] + for nic_str in args.nics: + err_msg = ("Invalid nic argument '%s'. Nic arguments must be of the " + "form --nic , with at minimum net-id or port-id " + "specified." % nic_str) + nic_info = {"net-id": "", "v4-fixed-ip": "", "port-id": ""} + + for kv_str in nic_str.split(","): + try: + k, v = kv_str.split("=", 1) + except ValueError as e: + raise exceptions.CommandError(err_msg) + + if k in nic_info: + nic_info[k] = v + else: + raise exceptions.CommandError(err_msg) + + if not nic_info['net-id'] and not nic_info['port-id']: + raise exceptions.CommandError(err_msg) + + nics.append(nic_info) + + hints = {} + if args.scheduler_hints: + for hint in args.scheduler_hints: + key, _sep, value = hint.partition('=') + # NOTE(vish): multiple copies of the same hint will + # result in a list of values + if key in hints: + if isinstance(hints[key], basestring): + hints[key] = [hints[key]] + hints[key] += [value] + else: + hints[key] = value + boot_args = [args.name, image, flavor] + + if str(args.config_drive).lower() in ("true", "1"): + config_drive = True + elif str(args.config_drive).lower() in ("false", "0", "", "none"): + config_drive = None + else: + config_drive = args.config_drive + + boot_kwargs = dict( + meta=meta, + files=files, + key_name=key_name, + reservation_id=reservation_id, + min_count=min_count, + max_count=max_count, + userdata=userdata, + availability_zone=availability_zone, + security_groups=security_groups, + block_device_mapping=block_device_mapping, + nics=nics, + scheduler_hints=hints, + config_drive=config_drive) + + return boot_args, boot_kwargs + + +@utils.arg('--flavor', + default=None, + metavar='', + help="Flavor ID (see 'nova flavor-list').") +@utils.arg('--image', + default=None, + metavar='', + help="Image ID (see 'nova image-list'). ") +@utils.arg('--image-with', + default=[], + type=_key_value_pairing, + action='append', + metavar='', + help="Image metadata property (see 'nova image-show'). ") +@utils.arg('--num-instances', + default=None, + type=int, + metavar='', + help="boot multi instances at a time (limited by quota).") +@utils.arg('--meta', + metavar="", + action='append', + default=[], + help="Record arbitrary key/value metadata to /meta.js " + "on the new server. Can be specified multiple times.") +@utils.arg('--file', + metavar="", + action='append', + dest='files', + default=[], + help="Store arbitrary files from locally to " + "on the new server. You may store up to 5 files.") +@utils.arg('--key-name', + metavar='', + help="Key name of keypair that should be created earlier with \ + the command keypair-add") +@utils.arg('--key_name', + help=argparse.SUPPRESS) +@utils.arg('name', metavar='', help='Name for the new server') +@utils.arg('--user-data', + default=None, + metavar='', + help="user data file to pass to be exposed by the metadata server.") +@utils.arg('--user_data', + help=argparse.SUPPRESS) +@utils.arg('--availability-zone', + default=None, + metavar='', + help="The availability zone for instance placement.") +@utils.arg('--availability_zone', + help=argparse.SUPPRESS) +@utils.arg('--security-groups', + default=None, + metavar='', + help="Comma separated list of security group names.") +@utils.arg('--security_groups', + help=argparse.SUPPRESS) +@utils.arg('--block-device-mapping', + metavar="", + action='append', + default=[], + help="Block device mapping in the format " + "=:::.") +@utils.arg('--block_device_mapping', + action='append', + help=argparse.SUPPRESS) +@utils.arg('--hint', + action='append', + dest='scheduler_hints', + default=[], + metavar='', + help="Send arbitrary key/value pairs to the scheduler for custom use.") +@utils.arg('--nic', + metavar="", + action='append', + dest='nics', + default=[], + help="Create a NIC on the server. " + "Specify option multiple times to create multiple NICs. " + "net-id: attach NIC to network with this UUID " + "(required if no port-id), " + "v4-fixed-ip: IPv4 fixed address for NIC (optional), " + "port-id: attach NIC to port with this UUID " + "(required if no net-id)") +@utils.arg('--config-drive', + metavar="", + dest='config_drive', + default=False, + help="Enable config drive") +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance builds so progress can be reported.') +def do_boot(cs, args): + """Boot a new server.""" + boot_args, boot_kwargs = _boot(cs, args) + + extra_boot_kwargs = utils.get_resource_manager_extra_kwargs(do_boot, args) + boot_kwargs.update(extra_boot_kwargs) + + server = cs.servers.create(*boot_args, **boot_kwargs) + + # Keep any information (like adminPass) returned by create + info = server._info + server = cs.servers.get(info['id']) + info.update(server._info) + + flavor = info.get('flavor', {}) + flavor_id = flavor.get('id', '') + info['flavor'] = _find_flavor(cs, flavor_id).name + + image = info.get('image', {}) + if image: + image_id = image.get('id', '') + info['image'] = _find_image(cs, image_id).name + else: # Booting from volume + info['image'] = "Attempt to boot from volume - no image supplied" + + info.pop('links', None) + info.pop('addresses', None) + + utils.print_dict(info) + + if args.poll: + _poll_for_status(cs.servers.get, info['id'], 'building', ['active']) + + +def do_cloudpipe_list(cs, _args): + """Print a list of all cloudpipe instances.""" + cloudpipes = cs.cloudpipe.list() + columns = ['Project Id', "Public IP", "Public Port", "Internal IP"] + utils.print_list(cloudpipes, columns) + + +@utils.arg('project', metavar='', help='Name of the project.') +def do_cloudpipe_create(cs, args): + """Create a cloudpipe instance for the given project.""" + cs.cloudpipe.create(args.project) + + +@utils.arg('address', metavar='', help='New IP Address.') +@utils.arg('port', metavar='', help='New Port.') +def do_cloudpipe_configure(cs, args): + """Update the VPN IP/port of a cloudpipe instance.""" + cs.cloudpipe.update(args.address, args.port) + + +def _poll_for_status(poll_fn, obj_id, action, final_ok_states, + poll_period=5, show_progress=True, + status_field="status", silent=False): + """Block while an action is being performed, periodically printing + progress. + """ + def print_progress(progress): + if show_progress: + msg = ('\rInstance %(action)s... %(progress)s%% complete' + % dict(action=action, progress=progress)) + else: + msg = '\rInstance %(action)s...' % dict(action=action) + + sys.stdout.write(msg) + sys.stdout.flush() + + if not silent: + print + + while True: + obj = poll_fn(obj_id) + + status = getattr(obj, status_field) + + if status: + status = status.lower() + + progress = getattr(obj, 'progress', None) or 0 + if status in final_ok_states: + if not silent: + print_progress(100) + print("\nFinished") + break + elif status == "error": + if not silent: + print("\nError %s instance" % action) + break + + if not silent: + print_progress(progress) + + time.sleep(poll_period) + + +def _translate_keys(collection, convert): + for item in collection: + keys = item.__dict__.keys() + for from_key, to_key in convert: + if from_key in keys and to_key not in keys: + setattr(item, to_key, item._info[from_key]) + + +def _translate_extended_states(collection): + power_states = [ + 'NOSTATE', # 0x00 + 'Running', # 0x01 + '', # 0x02 + 'Paused', # 0x03 + 'Shutdown', # 0x04 + '', # 0x05 + 'Crashed', # 0x06 + 'Suspended' # 0x07 + ] + + for item in collection: + try: + setattr(item, 'power_state', + power_states[getattr(item, 'power_state')] + ) + except AttributeError: + setattr(item, 'power_state', "N/A") + try: + getattr(item, 'task_state') + except AttributeError: + setattr(item, 'task_state', "N/A") + + +def _translate_flavor_keys(collection): + _translate_keys(collection, [('ram', 'memory_mb')]) + + +def _print_flavor_extra_specs(flavor): + try: + return flavor.get_keys() + except exceptions.NotFound: + return "N/A" + + +def _print_flavor_list(flavors, show_extra_specs=False): + _translate_flavor_keys(flavors) + + headers = [ + 'ID', + 'Name', + 'Memory_MB', + 'Disk', + 'Ephemeral', + 'Swap', + 'VCPUs', + 'RXTX_Factor', + 'Is_Public', + ] + + if show_extra_specs: + formatters = {'extra_specs': _print_flavor_extra_specs} + headers.append('extra_specs') + else: + formatters = {} + + utils.print_list(flavors, headers, formatters) + + +@utils.arg('--extra-specs', + dest='extra_specs', + action='store_true', + default=False, + help='Get extra-specs of each flavor.') +@utils.arg('--all', + dest='all', + action='store_true', + default=False, + help='Display all flavors (Admin only).') +def do_flavor_list(cs, args): + """Print a list of available 'flavors' (sizes of servers).""" + if args.all: + flavors = cs.flavors.list(is_public=None) + else: + flavors = cs.flavors.list() + _print_flavor_list(flavors, args.extra_specs) + + +@utils.arg('flavor', + metavar='', + help="Name or ID of the flavor to delete") +def do_flavor_delete(cs, args): + """Delete a specific flavor""" + flavorid = _find_flavor(cs, args.flavor) + cs.flavors.delete(flavorid) + _print_flavor_list([flavorid]) + + +@utils.arg('flavor', + metavar='', + help="Name or ID of flavor") +def do_flavor_show(cs, args): + """Show details about the given flavor.""" + flavor = _find_flavor(cs, args.flavor) + _print_flavor(flavor) + + +@utils.arg('name', + metavar='', + help="Name of the new flavor") +@utils.arg('id', + metavar='', + help="Unique ID (integer or UUID) for the new flavor." + " If specifying 'auto', a UUID will be generated as id") +@utils.arg('ram', + metavar='', + help="Memory size in MB") +@utils.arg('disk', + metavar='', + help="Disk size in GB") +@utils.arg('--ephemeral', + metavar='', + help="Ephemeral space size in GB (default 0)", + default=0) +@utils.arg('vcpus', + metavar='', + help="Number of vcpus") +@utils.arg('--swap', + metavar='', + help="Swap space size in MB (default 0)", + default=0) +@utils.arg('--rxtx-factor', + metavar='', + help="RX/TX factor (default 1)", + default=1.0) +@utils.arg('--is-public', + metavar='', + help="Make flavor accessible to the public (default true)", + type=utils.bool_from_str, + default=True) +def do_flavor_create(cs, args): + """Create a new flavor""" + f = cs.flavors.create(args.name, args.ram, args.vcpus, args.disk, args.id, + args.ephemeral, args.swap, args.rxtx_factor, + args.is_public) + _print_flavor_list([f]) + + +@utils.arg('flavor', + metavar='', + help="Name or ID of flavor") +@utils.arg('action', + metavar='', + choices=['set', 'unset'], + help="Actions: 'set' or 'unset'") +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Extra_specs to set/unset (only key is necessary on unset)') +def do_flavor_key(cs, args): + """Set or unset extra_spec for a flavor.""" + flavor = _find_flavor(cs, args.flavor) + keypair = _extract_metadata(args) + + if args.action == 'set': + flavor.set_keys(keypair) + elif args.action == 'unset': + flavor.unset_keys(keypair.keys()) + + +@utils.arg('--flavor', + metavar='', + help="Filter results by flavor name or ID.") +@utils.arg('--tenant', metavar='', + help='Filter results by tenant ID.') +def do_flavor_access_list(cs, args): + """Print access information about the given flavor.""" + if args.flavor and args.tenant: + raise exceptions.CommandError("Unable to filter results by " + "both --flavor and --tenant.") + elif args.flavor: + flavor = _find_flavor(cs, args.flavor) + if flavor.is_public: + raise exceptions.CommandError("Failed to get access list " + "for public flavor type.") + kwargs = {'flavor': flavor} + elif args.tenant: + kwargs = {'tenant': args.tenant} + else: + raise exceptions.CommandError("Unable to get all access lists. " + "Specify --flavor or --tenant") + + try: + access_list = cs.flavor_access.list(**kwargs) + except NotImplementedError as e: + raise exceptions.CommandError("%s" % str(e)) + + columns = ['Flavor_ID', 'Tenant_ID'] + utils.print_list(access_list, columns) + + +@utils.arg('flavor', + metavar='', + help="Filter results by flavor name or ID.") +@utils.arg('tenant', metavar='', + help='Filter results by tenant ID.') +def do_flavor_access_add(cs, args): + """Add flavor access for the given tenant.""" + flavor = _find_flavor(cs, args.flavor) + access_list = cs.flavor_access.add_tenant_access(flavor, args.tenant) + columns = ['Flavor_ID', 'Tenant_ID'] + utils.print_list(access_list, columns) + + +@utils.arg('flavor', + metavar='', + help="Filter results by flavor name or ID.") +@utils.arg('tenant', metavar='', + help='Filter results by tenant ID.') +def do_flavor_access_remove(cs, args): + """Remove flavor access for the given tenant.""" + flavor = _find_flavor(cs, args.flavor) + access_list = cs.flavor_access.remove_tenant_access(flavor, args.tenant) + columns = ['Flavor_ID', 'Tenant_ID'] + utils.print_list(access_list, columns) + + +@utils.arg('project_id', metavar='', + help='The ID of the project.') +def do_scrub(cs, args): + """Delete data associated with the project.""" + networks_list = cs.networks.list() + networks_list = [network for network in networks_list + if getattr(network, 'project_id', '') == args.project_id] + search_opts = {'all_tenants': 1} + groups = cs.security_groups.list(search_opts) + groups = [group for group in groups + if group.tenant_id == args.project_id] + for network in networks_list: + cs.networks.disassociate(network) + for group in groups: + cs.security_groups.delete(group) + + +def do_network_list(cs, _args): + """Print a list of available networks.""" + network_list = cs.networks.list() + columns = ['ID', 'Label', 'Cidr'] + utils.print_list(network_list, columns) + + +@utils.arg('network', + metavar='', + help="uuid or label of network") +def do_network_show(cs, args): + """Show details about the given network.""" + network = utils.find_resource(cs.networks, args.network) + utils.print_dict(network._info) + + +@utils.arg('--host-only', + dest='host_only', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=0) +@utils.arg('--project-only', + dest='project_only', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=0) +@utils.arg('network', + metavar='', + help="uuid of network") +def do_network_disassociate(cs, args): + """Disassociate host and/or project from the given network.""" + if args.host_only: + cs.networks.disassociate(args.network, True, False) + elif args.project_only: + cs.networks.disassociate(args.network, False, True) + else: + cs.networks.disassociate(args.network, True, True) + + +@utils.arg('network', + metavar='', + help="uuid of network") +@utils.arg('host', + metavar='', + help="Name of host") +def do_network_associate_host(cs, args): + """Associate host with network.""" + cs.networks.associate_host(args.network, args.host) + + +@utils.arg('network', + metavar='', + help="uuid of network") +def do_network_associate_project(cs, args): + """Associate project with network.""" + cs.networks.associate_project(args.network) + + +def _filter_network_create_options(args): + valid_args = ['label', 'cidr', 'vlan_start', 'vpn_start', 'cidr_v6', + 'gateway', 'gateway_v6', 'bridge', 'bridge_interface', + 'multi_host', 'dns1', 'dns2', 'uuid', 'fixed_cidr', + 'project_id', 'priority'] + kwargs = {} + for k, v in args.__dict__.items(): + if k in valid_args and v is not None: + kwargs[k] = v + + return kwargs + + +@utils.arg('label', + metavar='', + help="Label for network") +@utils.arg('--fixed-range-v4', + dest='cidr', + metavar='', + help="IPv4 subnet (ex: 10.0.0.0/8)") +@utils.arg('--fixed-range-v6', + dest="cidr_v6", + help='IPv6 subnet (ex: fe80::/64') +@utils.arg('--vlan', + dest='vlan_start', + metavar='', + help="vlan id") +@utils.arg('--vpn', + dest='vpn_start', + metavar='', + help="vpn start") +@utils.arg('--gateway', + dest="gateway", + help='gateway') +@utils.arg('--gateway-v6', + dest="gateway_v6", + help='ipv6 gateway') +@utils.arg('--bridge', + dest="bridge", + metavar='', + help='VIFs on this network are connected to this bridge') +@utils.arg('--bridge-interface', + dest="bridge_interface", + metavar='', + help='the bridge is connected to this interface') +@utils.arg('--multi-host', + dest="multi_host", + metavar="<'T'|'F'>", + help='Multi host') +@utils.arg('--dns1', + dest="dns1", + metavar="", help='First DNS') +@utils.arg('--dns2', + dest="dns2", + metavar="", + help='Second DNS') +@utils.arg('--uuid', + dest="uuid", + metavar="", + help='Network UUID') +@utils.arg('--fixed-cidr', + dest="fixed_cidr", + metavar='', + help='IPv4 subnet for fixed IPS (ex: 10.20.0.0/16)') +@utils.arg('--project-id', + dest="project_id", + metavar="", + help='Project id') +@utils.arg('--priority', + dest="priority", + metavar="", + help='Network interface priority') +def do_network_create(cs, args): + """Create a network.""" + + if not (args.cidr or args.cidr_v6): + raise exceptions.CommandError( + "Must specify eith fixed_range_v4 or fixed_range_v6") + kwargs = _filter_network_create_options(args) + if args.multi_host is not None: + kwargs['multi_host'] = bool(args.multi_host == 'T' or + strutils.bool_from_string(args.multi_host)) + + cs.networks.create(**kwargs) + + +@utils.arg('--limit', + dest="limit", + metavar="", + help='number of images to return per request') +def do_image_list(cs, _args): + """Print a list of available images to boot from.""" + limit = _args.limit + image_list = cs.images.list(limit=limit) + + def parse_server_name(image): + try: + return image.server['id'] + except (AttributeError, KeyError): + return '' + + fmts = {'Server': parse_server_name} + utils.print_list(image_list, ['ID', 'Name', 'Status', 'Server'], + fmts, sortby_index=1) + + +@utils.arg('image', + metavar='', + help="Name or ID of image") +@utils.arg('action', + metavar='', + choices=['set', 'delete'], + help="Actions: 'set' or 'delete'") +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Metadata to add/update or delete (only key is necessary on delete)') +def do_image_meta(cs, args): + """Set or Delete metadata on an image.""" + image = _find_image(cs, args.image) + metadata = _extract_metadata(args) + + if args.action == 'set': + cs.images.set_meta(image, metadata) + elif args.action == 'delete': + cs.images.delete_meta(image, metadata.keys()) + + +def _extract_metadata(args): + metadata = {} + for metadatum in args.metadata[0]: + # Can only pass the key in on 'delete' + # So this doesn't have to have '=' + if metadatum.find('=') > -1: + (key, value) = metadatum.split('=', 1) + else: + key = metadatum + value = None + + metadata[key] = value + return metadata + + +def _print_image(image): + info = image._info.copy() + + # ignore links, we don't need to present those + info.pop('links') + + # try to replace a server entity to just an id + server = info.pop('server', None) + try: + info['server'] = server['id'] + except (KeyError, TypeError): + pass + + # break up metadata and display each on its own row + metadata = info.pop('metadata', {}) + try: + for key, value in metadata.items(): + _key = 'metadata %s' % key + info[_key] = value + except AttributeError: + pass + + utils.print_dict(info) + + +def _print_flavor(flavor): + info = flavor._info.copy() + # ignore links, we don't need to present those + info.pop('links') + info.update({"extra_specs": _print_flavor_extra_specs(flavor)}) + utils.print_dict(info) + + +@utils.arg('image', + metavar='', + help="Name or ID of image") +def do_image_show(cs, args): + """Show details about the given image.""" + image = _find_image(cs, args.image) + _print_image(image) + + +@utils.arg('image', metavar='', nargs='+', + help='Name or ID of image(s).') +def do_image_delete(cs, args): + """Delete specified image(s).""" + for image in args.image: + try: + _find_image(cs, image).delete() + except Exception as e: + print("Delete for image %s failed: %s" % (image, e)) + + +@utils.arg('--reservation-id', + dest='reservation_id', + metavar='', + default=None, + help='Only return instances that match reservation-id.') +@utils.arg('--reservation_id', + help=argparse.SUPPRESS) +@utils.arg('--ip', + dest='ip', + metavar='', + default=None, + help='Search with regular expression match by IP address (Admin only).') +@utils.arg('--ip6', + dest='ip6', + metavar='', + default=None, + help='Search with regular expression match by IPv6 address (Admin only).') +@utils.arg('--name', + dest='name', + metavar='', + default=None, + help='Search with regular expression match by name') +@utils.arg('--instance-name', + dest='instance_name', + metavar='', + default=None, + help='Search with regular expression match by instance name (Admin only).') +@utils.arg('--instance_name', + help=argparse.SUPPRESS) +@utils.arg('--status', + dest='status', + metavar='', + default=None, + help='Search by server status') +@utils.arg('--flavor', + dest='flavor', + metavar='', + default=None, + help='Search by flavor name or ID') +@utils.arg('--image', + dest='image', + metavar='', + default=None, + help='Search by image name or ID') +@utils.arg('--host', + dest='host', + metavar='', + default=None, + help='Search instances by hostname to which they are assigned ' + '(Admin only).') +@utils.arg('--all-tenants', + dest='all_tenants', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=int(utils.bool_from_str(os.environ.get("ALL_TENANTS", 'false'))), + help='Display information from all tenants (Admin only).') +@utils.arg('--all_tenants', + nargs='?', + type=int, + const=1, + help=argparse.SUPPRESS) +@utils.arg('--tenant', + #nova db searches by project_id + dest='tenant', + metavar='', + nargs='?', + help='Display information from single tenant (Admin only).') +@utils.arg('--fields', + default=None, + metavar='', + help='Comma-separated list of fields to display. ' + 'Use the show command to see which fields are available.') +def do_list(cs, args): + """List active servers.""" + imageid = None + flavorid = None + if args.image: + imageid = _find_image(cs, args.image).id + if args.flavor: + flavorid = _find_flavor(cs, args.flavor).id + search_opts = { + 'all_tenants': args.all_tenants, + 'reservation_id': args.reservation_id, + 'ip': args.ip, + 'ip6': args.ip6, + 'name': args.name, + 'image': imageid, + 'flavor': flavorid, + 'status': args.status, + 'tenant_id': args.tenant, + 'host': args.host, + 'instance_name': args.instance_name} + + filters = {'flavor': lambda f: f['id'], + 'security_groups': utils._format_security_groups} + + formatters = {} + field_titles = [] + if args.fields: + for field in args.fields.split(','): + field_title, formatter = utils._make_field_formatter(field, + filters) + field_titles.append(field_title) + formatters[field_title] = formatter + + id_col = 'ID' + + servers = cs.servers.list(search_opts=search_opts) + convert = [('OS-EXT-SRV-ATTR:host', 'host'), + ('OS-EXT-STS:task_state', 'task_state'), + ('OS-EXT-SRV-ATTR:instance_name', 'instance_name'), + ('OS-EXT-STS:power_state', 'power_state'), + ('hostId', 'host_id')] + _translate_keys(servers, convert) + _translate_extended_states(servers) + if field_titles: + columns = [id_col] + field_titles + else: + columns = [ + id_col, + 'Name', + 'Status', + 'Task State', + 'Power State', + 'Networks' + ] + formatters['Networks'] = utils._format_servers_list_networks + utils.print_list(servers, columns, + formatters, sortby_index=1) + + +@utils.arg('--hard', + dest='reboot_type', + action='store_const', + const=servers.REBOOT_HARD, + default=servers.REBOOT_SOFT, + help='Perform a hard reboot (instead of a soft one).') +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance is rebooting.') +def do_reboot(cs, args): + """Reboot a server.""" + server = _find_server(cs, args.server) + server.reboot(args.reboot_type) + + if args.poll: + _poll_for_status(cs.servers.get, server.id, 'rebooting', ['active'], + show_progress=False) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('image', metavar='', help="Name or ID of new image.") +@utils.arg('--rebuild-password', + dest='rebuild_password', + metavar='', + default=False, + help="Set the provided password on the rebuild instance.") +@utils.arg('--rebuild_password', + help=argparse.SUPPRESS) +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance rebuilds so progress can be reported.') +@utils.arg('--minimal', + dest='minimal', + action="store_true", + default=False, + help='Skips flavor/image lookups when showing instances') +def do_rebuild(cs, args): + """Shutdown, re-image, and re-boot a server.""" + server = _find_server(cs, args.server) + image = _find_image(cs, args.image) + + if args.rebuild_password is not False: + _password = args.rebuild_password + else: + _password = None + + kwargs = utils.get_resource_manager_extra_kwargs(do_rebuild, args) + server.rebuild(image, _password, **kwargs) + _print_server(cs, args) + + if args.poll: + _poll_for_status(cs.servers.get, server.id, 'rebuilding', ['active']) + + +@utils.arg('server', metavar='', + help='Name (old name) or ID of server.') +@utils.arg('name', metavar='', help='New name for the server.') +def do_rename(cs, args): + """Rename a server.""" + _find_server(cs, args.server).update(name=args.name) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('flavor', metavar='', help="Name or ID of new flavor.") +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance resizes so progress can be reported.') +def do_resize(cs, args): + """Resize a server.""" + server = _find_server(cs, args.server) + flavor = _find_flavor(cs, args.flavor) + kwargs = utils.get_resource_manager_extra_kwargs(do_resize, args) + server.resize(flavor, **kwargs) + if args.poll: + _poll_for_status(cs.servers.get, server.id, 'resizing', + ['active', 'verify_resize']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_resize_confirm(cs, args): + """Confirm a previous resize.""" + _find_server(cs, args.server).confirm_resize() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_resize_revert(cs, args): + """Revert a previous resize (and return to the previous VM).""" + _find_server(cs, args.server).revert_resize() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance migrates so progress can be reported.') +def do_migrate(cs, args): + """Migrate a server. The new host will be selected by the scheduler.""" + server = _find_server(cs, args.server) + server.migrate() + + if args.poll: + _poll_for_status(cs.servers.get, server.id, 'migrating', + ['active', 'verify_resize']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_pause(cs, args): + """Pause a server.""" + _find_server(cs, args.server).pause() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_unpause(cs, args): + """Unpause a server.""" + _find_server(cs, args.server).unpause() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_stop(cs, args): + """Stop a server.""" + _find_server(cs, args.server).stop() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_start(cs, args): + """Start a server.""" + _find_server(cs, args.server).start() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_lock(cs, args): + """Lock a server.""" + _find_server(cs, args.server).lock() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_unlock(cs, args): + """Unlock a server.""" + _find_server(cs, args.server).unlock() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_suspend(cs, args): + """Suspend a server.""" + _find_server(cs, args.server).suspend() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_resume(cs, args): + """Resume a server.""" + _find_server(cs, args.server).resume() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_rescue(cs, args): + """Rescue a server.""" + utils.print_dict(_find_server(cs, args.server).rescue()[1]) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_unrescue(cs, args): + """Unrescue a server.""" + _find_server(cs, args.server).unrescue() + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_diagnostics(cs, args): + """Retrieve server diagnostics.""" + server = _find_server(cs, args.server) + utils.print_dict(cs.servers.diagnostics(server)[1]) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_root_password(cs, args): + """ + Change the root password for a server. + """ + server = _find_server(cs, args.server) + p1 = getpass.getpass('New password: ') + p2 = getpass.getpass('Again: ') + if p1 != p2: + raise exceptions.CommandError("Passwords do not match.") + server.change_password(p1) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('name', metavar='', help='Name of snapshot.') +@utils.arg('--poll', + dest='poll', + action="store_true", + default=False, + help='Blocks while instance snapshots so progress can be reported.') +def do_image_create(cs, args): + """Create a new image by taking a snapshot of a running server.""" + server = _find_server(cs, args.server) + image_uuid = cs.servers.create_image(server, args.name) + + if args.poll: + _poll_for_status(cs.images.get, image_uuid, 'snapshotting', + ['active']) + + # NOTE(sirp): A race-condition exists between when the image finishes + # uploading and when the servers's `task_state` is cleared. To account + # for this, we need to poll a second time to ensure the `task_state` is + # cleared before returning, ensuring that a snapshot taken immediately + # after this function returns will succeed. + # + # A better long-term solution will be to separate 'snapshotting' and + # 'image-uploading' in Nova and clear the task-state once the VM + # snapshot is complete but before the upload begins. + task_state_field = "OS-EXT-STS:task_state" + if hasattr(server, task_state_field): + _poll_for_status(cs.servers.get, server.id, 'image_snapshot', + [None], status_field=task_state_field, + show_progress=False, silent=True) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('name', metavar='', help='Name of the backup image.') +@utils.arg('backup_type', metavar='', + help='The backup type, like "daily" or "weekly".') +@utils.arg('rotation', metavar='', + help='Int parameter representing how many backups to keep around.') +def do_backup(cs, args): + """Backup a instance by create a 'backup' type snapshot.""" + _find_server(cs, args.server).backup(args.name, + args.backup_type, + args.rotation) + + +@utils.arg('server', + metavar='', + help="Name or ID of server") +@utils.arg('action', + metavar='', + choices=['set', 'delete'], + help="Actions: 'set' or 'delete'") +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Metadata to set or delete (only key is necessary on delete)') +def do_meta(cs, args): + """Set or Delete metadata on a server.""" + server = _find_server(cs, args.server) + metadata = _extract_metadata(args) + + if args.action == 'set': + cs.servers.set_meta(server, metadata) + elif args.action == 'delete': + cs.servers.delete_meta(server, metadata.keys()) + + +def _print_server(cs, args): + # By default when searching via name we will do a + # findall(name=blah) and due a REST /details which is not the same + # as a .get() and doesn't get the information about flavors and + # images. This fix it as we redo the call with the id which does a + # .get() to get all informations. + server = _find_server(cs, args.server) + + networks = server.networks + info = server._info.copy() + for network_label, address_list in networks.items(): + info['%s network' % network_label] = ', '.join(address_list) + + flavor = info.get('flavor', {}) + flavor_id = flavor.get('id', '') + if args.minimal: + info['flavor'] = flavor_id + else: + info['flavor'] = '%s (%s)' % (_find_flavor(cs, flavor_id).name, + flavor_id) + + image = info.get('image', {}) + if image: + image_id = image.get('id', '') + if args.minimal: + info['image'] = image_id + else: + try: + info['image'] = '%s (%s)' % (_find_image(cs, image_id).name, + image_id) + except Exception: + info['image'] = '%s (%s)' % ("Image not found", image_id) + else: # Booted from volume + info['image'] = "Attempt to boot from volume - no image supplied" + + info.pop('links', None) + info.pop('addresses', None) + + utils.print_dict(info) + + +@utils.arg('--minimal', + dest='minimal', + action="store_true", + default=False, + help='Skips flavor/image lookups when showing instances') +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_show(cs, args): + """Show details about the given server.""" + _print_server(cs, args) + + +@utils.arg('server', metavar='', nargs='+', + help='Name or ID of server(s).') +def do_delete(cs, args): + """Immediately shut down and delete specified server(s).""" + failure_count = 0 + + for server in args.server: + try: + _find_server(cs, server).delete() + except Exception as e: + failure_count += 1 + print(e) + + if failure_count == len(args.server): + raise exceptions.CommandError("Unable to delete any of the specified " + "servers.") + + +def _find_server(cs, server): + """Get a server by name or ID.""" + return utils.find_resource(cs.servers, server) + + +def _find_image(cs, image): + """Get an image by name or ID.""" + return utils.find_resource(cs.images, image) + + +def _find_flavor(cs, flavor): + """Get a flavor by name, ID, or RAM size.""" + try: + return utils.find_resource(cs.flavors, flavor) + except exceptions.NotFound: + return cs.flavors.find(ram=flavor) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('network_id', + metavar='', + help='Network ID.') +def do_add_fixed_ip(cs, args): + """Add new IP address on a network to server.""" + server = _find_server(cs, args.server) + server.add_fixed_ip(args.network_id) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('address', metavar='
', help='IP Address.') +def do_remove_fixed_ip(cs, args): + """Remove an IP address from a server.""" + server = _find_server(cs, args.server) + server.remove_fixed_ip(args.address) + + +def _find_volume(cs, volume): + """Get a volume by name or ID.""" + return utils.find_resource(cs.volumes, volume) + + +def _find_volume_snapshot(cs, snapshot): + """Get a volume snapshot by name or ID.""" + return utils.find_resource(cs.volume_snapshots, snapshot) + + +def _print_volume(volume): + utils.print_dict(volume._info) + + +def _print_volume_snapshot(snapshot): + utils.print_dict(snapshot._info) + + +def _translate_volume_keys(collection): + _translate_keys(collection, + [('displayName', 'display_name'), + ('volumeType', 'volume_type')]) + + +def _translate_volume_snapshot_keys(collection): + _translate_keys(collection, + [('displayName', 'display_name'), + ('volumeId', 'volume_id')]) + + +def _translate_availability_zone_keys(collection): + _translate_keys(collection, + [('zoneName', 'name'), ('zoneState', 'status')]) + + +@utils.arg('--all-tenants', + dest='all_tenants', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=int(utils.bool_from_str(os.environ.get("ALL_TENANTS", 'false'))), + help='Display information from all tenants (Admin only).') +@utils.arg('--all_tenants', + nargs='?', + type=int, + const=1, + help=argparse.SUPPRESS) +@utils.service_type('volume') +def do_volume_list(cs, args): + """List all the volumes.""" + search_opts = {'all_tenants': args.all_tenants} + volumes = cs.volumes.list(search_opts=search_opts) + _translate_volume_keys(volumes) + + # Create a list of servers to which the volume is attached + for vol in volumes: + servers = [s.get('server_id') for s in vol.attachments] + setattr(vol, 'attached_to', ','.join(map(str, servers))) + utils.print_list(volumes, ['ID', 'Status', 'Display Name', + 'Size', 'Volume Type', 'Attached to']) + + +@utils.arg('volume', metavar='', help='Name or ID of the volume.') +@utils.service_type('volume') +def do_volume_show(cs, args): + """Show details about a volume.""" + volume = _find_volume(cs, args.volume) + _print_volume(volume) + + +@utils.arg('size', + metavar='', + type=int, + help='Size of volume in GB') +@utils.arg('--snapshot-id', + metavar='', + default=None, + help='Optional snapshot id to create the volume from. (Default=None)') +@utils.arg('--snapshot_id', + help=argparse.SUPPRESS) +@utils.arg('--image-id', + metavar='', + help='Optional image id to create the volume from. (Default=None)', + default=None) +@utils.arg('--display-name', + metavar='', + default=None, + help='Optional volume name. (Default=None)') +@utils.arg('--display_name', + help=argparse.SUPPRESS) +@utils.arg('--display-description', + metavar='', + default=None, + help='Optional volume description. (Default=None)') +@utils.arg('--display_description', + help=argparse.SUPPRESS) +@utils.arg('--volume-type', + metavar='', + default=None, + help='Optional volume type. (Default=None)') +@utils.arg('--volume_type', + help=argparse.SUPPRESS) +@utils.arg('--availability-zone', metavar='', + help='Optional Availability Zone for volume. (Default=None)', + default=None) +@utils.service_type('volume') +def do_volume_create(cs, args): + """Add a new volume.""" + volume = cs.volumes.create(args.size, + args.snapshot_id, + args.display_name, + args.display_description, + args.volume_type, + args.availability_zone, + imageRef=args.image_id) + _print_volume(volume) + + +@utils.arg('volume', + metavar='', + help='Name or ID of the volume to delete.') +@utils.service_type('volume') +def do_volume_delete(cs, args): + """Remove a volume.""" + volume = _find_volume(cs, args.volume) + volume.delete() + + +@utils.arg('server', + metavar='', + help='Name or ID of server.') +@utils.arg('volume', + metavar='', + help='ID of the volume to attach.') +@utils.arg('device', metavar='', + help='Name of the device e.g. /dev/vdb. ' + 'Use "auto" for autoassign (if supported)') +def do_volume_attach(cs, args): + """Attach a volume to a server.""" + if args.device == 'auto': + args.device = None + + volume = cs.volumes.create_server_volume(_find_server(cs, args.server).id, + args.volume, + args.device) + _print_volume(volume) + + +@utils.arg('server', + metavar='', + help='Name or ID of server.') +@utils.arg('attachment_id', + metavar='', + help='Attachment ID of the volume.') +def do_volume_detach(cs, args): + """Detach a volume from a server.""" + cs.volumes.delete_server_volume(_find_server(cs, args.server).id, + args.attachment_id) + + +@utils.service_type('volume') +def do_volume_snapshot_list(cs, _args): + """List all the snapshots.""" + snapshots = cs.volume_snapshots.list() + _translate_volume_snapshot_keys(snapshots) + utils.print_list(snapshots, ['ID', 'Volume ID', 'Status', 'Display Name', + 'Size']) + + +@utils.arg('snapshot', + metavar='', + help='Name or ID of the snapshot.') +@utils.service_type('volume') +def do_volume_snapshot_show(cs, args): + """Show details about a snapshot.""" + snapshot = _find_volume_snapshot(cs, args.snapshot) + _print_volume_snapshot(snapshot) + + +@utils.arg('volume_id', + metavar='', + help='ID of the volume to snapshot') +@utils.arg('--force', + metavar='', + help='Optional flag to indicate whether to snapshot a volume even if its ' + 'attached to an instance. (Default=False)', + default=False) +@utils.arg('--display-name', + metavar='', + default=None, + help='Optional snapshot name. (Default=None)') +@utils.arg('--display_name', + help=argparse.SUPPRESS) +@utils.arg('--display-description', + metavar='', + default=None, + help='Optional snapshot description. (Default=None)') +@utils.arg('--display_description', + help=argparse.SUPPRESS) +@utils.service_type('volume') +def do_volume_snapshot_create(cs, args): + """Add a new snapshot.""" + snapshot = cs.volume_snapshots.create(args.volume_id, + args.force, + args.display_name, + args.display_description) + _print_volume_snapshot(snapshot) + + +@utils.arg('snapshot', + metavar='', + help='Name or ID of the snapshot to delete.') +@utils.service_type('volume') +def do_volume_snapshot_delete(cs, args): + """Remove a snapshot.""" + snapshot = _find_volume_snapshot(cs, args.snapshot) + snapshot.delete() + + +def _print_volume_type_list(vtypes): + utils.print_list(vtypes, ['ID', 'Name']) + + +@utils.service_type('volume') +def do_volume_type_list(cs, args): + """Print a list of available 'volume types'.""" + vtypes = cs.volume_types.list() + _print_volume_type_list(vtypes) + + +@utils.arg('name', + metavar='', + help="Name of the new flavor") +@utils.service_type('volume') +def do_volume_type_create(cs, args): + """Create a new volume type.""" + vtype = cs.volume_types.create(args.name) + _print_volume_type_list([vtype]) + + +@utils.arg('id', + metavar='', + help="Unique ID of the volume type to delete") +@utils.service_type('volume') +def do_volume_type_delete(cs, args): + """Delete a specific flavor""" + cs.volume_types.delete(args.id) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('console_type', + metavar='', + help='Type of vnc console ("novnc" or "xvpvnc").') +def do_get_vnc_console(cs, args): + """Get a vnc console to a server.""" + server = _find_server(cs, args.server) + data = server.get_vnc_console(args.console_type) + + class VNCConsole: + def __init__(self, console_dict): + self.type = console_dict['type'] + self.url = console_dict['url'] + + utils.print_list([VNCConsole(data['console'])], ['Type', 'Url']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('console_type', + metavar='', + help='Type of spice console ("spice-html5").') +def do_get_spice_console(cs, args): + """Get a spice console to a server.""" + server = _find_server(cs, args.server) + data = server.get_spice_console(args.console_type) + + class SPICEConsole: + def __init__(self, console_dict): + self.type = console_dict['type'] + self.url = console_dict['url'] + + utils.print_list([SPICEConsole(data['console'])], ['Type', 'Url']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('private_key', + metavar='', + help='Private key (used locally to decrypt password).') +def do_get_password(cs, args): + """Get password for a server.""" + server = _find_server(cs, args.server) + data = server.get_password(args.private_key) + print(data) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_clear_password(cs, args): + """Clear password for a server.""" + server = _find_server(cs, args.server) + server.clear_password() + + +def _print_floating_ip_list(floating_ips): + utils.print_list(floating_ips, ['Ip', 'Instance Id', 'Fixed Ip', 'Pool']) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--length', + metavar='', + default=None, + help='Length in lines to tail.') +def do_console_log(cs, args): + """Get console log output of a server.""" + server = _find_server(cs, args.server) + data = server.get_console_output(length=args.length) + print(data) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('address', metavar='
', help='IP Address.') +@utils.arg('--fixed-address', + metavar='', + default=None, + help='Fixed IP Address to associate with.') +def do_add_floating_ip(cs, args): + """Add a floating IP address to a server.""" + server = _find_server(cs, args.server) + server.add_floating_ip(args.address, args.fixed_address) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('address', metavar='
', help='IP Address.') +def do_remove_floating_ip(cs, args): + """Remove a floating IP address from a server.""" + server = _find_server(cs, args.server) + server.remove_floating_ip(args.address) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('secgroup', metavar='', help='Name of Security Group.') +def do_add_secgroup(cs, args): + """Add a Security Group to a server.""" + server = _find_server(cs, args.server) + server.add_security_group(args.secgroup) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('secgroup', metavar='', help='Name of Security Group.') +def do_remove_secgroup(cs, args): + """Remove a Security Group from a server.""" + server = _find_server(cs, args.server) + server.remove_security_group(args.secgroup) + + +@utils.arg('pool', + metavar='', + help='Name of Floating IP Pool. (Optional)', + nargs='?', + default=None) +def do_floating_ip_create(cs, args): + """Allocate a floating IP for the current tenant.""" + _print_floating_ip_list([cs.floating_ips.create(pool=args.pool)]) + + +@utils.arg('address', metavar='
', help='IP of Floating Ip.') +def do_floating_ip_delete(cs, args): + """De-allocate a floating IP.""" + floating_ips = cs.floating_ips.list() + for floating_ip in floating_ips: + if floating_ip.ip == args.address: + return cs.floating_ips.delete(floating_ip.id) + raise exceptions.CommandError("Floating ip %s not found." % args.address) + + +def do_floating_ip_list(cs, _args): + """List floating ips for this tenant.""" + _print_floating_ip_list(cs.floating_ips.list()) + + +def do_floating_ip_pool_list(cs, _args): + """List all floating ip pools.""" + utils.print_list(cs.floating_ip_pools.list(), ['name']) + + +@utils.arg('--host', dest='host', metavar='', default=None, + help='Filter by host') +def do_floating_ip_bulk_list(cs, args): + """List all floating ips.""" + utils.print_list(cs.floating_ips_bulk.list(args.host), ['project_id', + 'address', + 'instance_uuid', + 'pool', + 'interface']) + + +@utils.arg('ip_range', metavar='', help='Address range to create') +@utils.arg('--pool', dest='pool', metavar='', default=None, + help='Pool for new Floating IPs') +@utils.arg('--interface', metavar='', default=None, + help='Interface for new Floating IPs') +def do_floating_ip_bulk_create(cs, args): + """Bulk create floating ips by range.""" + cs.floating_ips_bulk.create(args.ip_range, args.pool, args.interface) + + +@utils.arg('ip_range', metavar='', help='Address range to delete') +def do_floating_ip_bulk_delete(cs, args): + """Bulk delete floating ips by range.""" + cs.floating_ips_bulk.delete(args.ip_range) + + +def _print_dns_list(dns_entries): + utils.print_list(dns_entries, ['ip', 'name', 'domain']) + + +def _print_domain_list(domain_entries): + utils.print_list(domain_entries, ['domain', 'scope', + 'project', 'availability_zone']) + + +def do_dns_domains(cs, args): + """Print a list of available dns domains.""" + domains = cs.dns_domains.domains() + _print_domain_list(domains) + + +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('--ip', metavar='', help='ip address', default=None) +@utils.arg('--name', metavar='', help='DNS name', default=None) +def do_dns_list(cs, args): + """List current DNS entries for domain and ip or domain and name.""" + if not (args.ip or args.name): + raise exceptions.CommandError( + "You must specify either --ip or --name") + if args.name: + entry = cs.dns_entries.get(args.domain, args.name) + _print_dns_list([entry]) + else: + entries = cs.dns_entries.get_for_ip(args.domain, + ip=args.ip) + _print_dns_list(entries) + + +@utils.arg('ip', metavar='', help='ip address') +@utils.arg('name', metavar='', help='DNS name') +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('--type', metavar='', help='dns type (e.g. "A")', default='A') +def do_dns_create(cs, args): + """Create a DNS entry for domain, name and ip.""" + cs.dns_entries.create(args.domain, args.name, args.ip, args.type) + + +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('name', metavar='', help='DNS name') +def do_dns_delete(cs, args): + """Delete the specified DNS entry.""" + cs.dns_entries.delete(args.domain, args.name) + + +@utils.arg('domain', metavar='', help='DNS domain') +def do_dns_delete_domain(cs, args): + """Delete the specified DNS domain.""" + cs.dns_domains.delete(args.domain) + + +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('--availability-zone', + metavar='', + default=None, + help='Limit access to this domain to instances ' + 'in the specified availability zone.') +@utils.arg('--availability_zone', + help=argparse.SUPPRESS) +def do_dns_create_private_domain(cs, args): + """Create the specified DNS domain.""" + cs.dns_domains.create_private(args.domain, + args.availability_zone) + + +@utils.arg('domain', metavar='', help='DNS domain') +@utils.arg('--project', metavar='', + help='Limit access to this domain to users ' + 'of the specified project.', + default=None) +def do_dns_create_public_domain(cs, args): + """Create the specified DNS domain.""" + cs.dns_domains.create_public(args.domain, + args.project) + + +def _print_secgroup_rules(rules): + class FormattedRule: + def __init__(self, obj): + items = (obj if isinstance(obj, dict) else obj._info).items() + for k, v in items: + if k == 'ip_range': + v = v.get('cidr') + elif k == 'group': + k = 'source_group' + v = v.get('name') + if v is None: + v = '' + + setattr(self, k, v) + + rules = [FormattedRule(rule) for rule in rules] + utils.print_list(rules, ['IP Protocol', 'From Port', 'To Port', + 'IP Range', 'Source Group']) + + +def _print_secgroups(secgroups): + utils.print_list(secgroups, ['Id', 'Name', 'Description']) + + +def _get_secgroup(cs, secgroup): + # Check secgroup is an ID + if uuidutils.is_uuid_like(strutils.safe_encode(secgroup)): + try: + return cs.security_groups.get(secgroup) + except exceptions.NotFound: + pass + + # Check secgroup as a name + match_found = False + for s in cs.security_groups.list(): + encoding = (locale.getpreferredencoding() or + sys.stdin.encoding or + 'UTF-8') + s.name = s.name.encode(encoding) + if secgroup == s.name: + if match_found != False: + msg = ("Multiple security group matches found for name" + " '%s', use an ID to be more specific." % secgroup) + raise exceptions.NoUniqueMatch(msg) + match_found = s + if match_found is False: + raise exceptions.CommandError("Secgroup ID or name '%s' not found." + % secgroup) + return match_found + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('ip_proto', + metavar='', + help='IP protocol (icmp, tcp, udp).') +@utils.arg('from_port', + metavar='', + help='Port at start of range.') +@utils.arg('to_port', + metavar='', + help='Port at end of range.') +@utils.arg('cidr', metavar='', help='CIDR for address range.') +def do_secgroup_add_rule(cs, args): + """Add a rule to a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + rule = cs.security_group_rules.create(secgroup.id, + args.ip_proto, + args.from_port, + args.to_port, + args.cidr) + _print_secgroup_rules([rule]) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('ip_proto', + metavar='', + help='IP protocol (icmp, tcp, udp).') +@utils.arg('from_port', + metavar='', + help='Port at start of range.') +@utils.arg('to_port', + metavar='', + help='Port at end of range.') +@utils.arg('cidr', metavar='', help='CIDR for address range.') +def do_secgroup_delete_rule(cs, args): + """Delete a rule from a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + for rule in secgroup.rules: + if (rule['ip_protocol'] and + rule['ip_protocol'].upper() == args.ip_proto.upper() and + rule['from_port'] == int(args.from_port) and + rule['to_port'] == int(args.to_port) and + rule['ip_range']['cidr'] == args.cidr): + _print_secgroup_rules([rule]) + return cs.security_group_rules.delete(rule['id']) + + raise exceptions.CommandError("Rule not found") + + +@utils.arg('name', metavar='', help='Name of security group.') +@utils.arg('description', metavar='', + help='Description of security group.') +def do_secgroup_create(cs, args): + """Create a security group.""" + secgroup = cs.security_groups.create(args.name, args.description) + _print_secgroups([secgroup]) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('name', metavar='', help='Name of security group.') +@utils.arg('description', metavar='', + help='Description of security group.') +def do_secgroup_update(cs, args): + """Update a security group.""" + sg = _get_secgroup(cs, args.secgroup) + secgroup = cs.security_groups.update(sg, args.name, args.description) + _print_secgroups([secgroup]) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +def do_secgroup_delete(cs, args): + """Delete a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + cs.security_groups.delete(secgroup) + _print_secgroups([secgroup]) + + +@utils.arg('--all-tenants', + dest='all_tenants', + metavar='<0|1>', + nargs='?', + type=int, + const=1, + default=int(utils.bool_from_str(os.environ.get("ALL_TENANTS", 'false'))), + help='Display information from all tenants (Admin only).') +@utils.arg('--all_tenants', + nargs='?', + type=int, + const=1, + help=argparse.SUPPRESS) +def do_secgroup_list(cs, args): + """List security groups for the current tenant.""" + search_opts = {'all_tenants': args.all_tenants} + columns = ['Id', 'Name', 'Description'] + if args.all_tenants: + columns.append('Tenant_ID') + groups = cs.security_groups.list(search_opts=search_opts) + utils.print_list(groups, columns) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +def do_secgroup_list_rules(cs, args): + """List rules for a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + _print_secgroup_rules(secgroup.rules) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('source_group', + metavar='', + help='ID or name of source group.') +@utils.arg('ip_proto', + metavar='', + help='IP protocol (icmp, tcp, udp).') +@utils.arg('from_port', + metavar='', + help='Port at start of range.') +@utils.arg('to_port', + metavar='', + help='Port at end of range.') +def do_secgroup_add_group_rule(cs, args): + """Add a source group rule to a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + source_group = _get_secgroup(cs, args.source_group) + params = {} + params['group_id'] = source_group.id + + if args.ip_proto or args.from_port or args.to_port: + if not (args.ip_proto and args.from_port and args.to_port): + raise exceptions.CommandError("ip_proto, from_port, and to_port" + " must be specified together") + params['ip_protocol'] = args.ip_proto.upper() + params['from_port'] = args.from_port + params['to_port'] = args.to_port + + rule = cs.security_group_rules.create(secgroup.id, **params) + _print_secgroup_rules([rule]) + + +@utils.arg('secgroup', + metavar='', + help='ID or name of security group.') +@utils.arg('source_group', + metavar='', + help='ID or name of source group.') +@utils.arg('ip_proto', + metavar='', + help='IP protocol (icmp, tcp, udp).') +@utils.arg('from_port', + metavar='', + help='Port at start of range.') +@utils.arg('to_port', + metavar='', + help='Port at end of range.') +def do_secgroup_delete_group_rule(cs, args): + """Delete a source group rule from a security group.""" + secgroup = _get_secgroup(cs, args.secgroup) + source_group = _get_secgroup(cs, args.source_group) + params = {} + params['group_name'] = source_group.name + + if args.ip_proto or args.from_port or args.to_port: + if not (args.ip_proto and args.from_port and args.to_port): + raise exceptions.CommandError("ip_proto, from_port, and to_port" + " must be specified together") + params['ip_protocol'] = args.ip_proto.upper() + params['from_port'] = int(args.from_port) + params['to_port'] = int(args.to_port) + + for rule in secgroup.rules: + if (rule.get('ip_protocol').upper() == params.get( + 'ip_protocol').upper() and + rule.get('from_port') == params.get('from_port') and + rule.get('to_port') == params.get('to_port') and + rule.get('group', {}).get('name') == + params.get('group_name')): + return cs.security_group_rules.delete(rule['id']) + + raise exceptions.CommandError("Rule not found") + + +@utils.arg('name', metavar='', help='Name of key.') +@utils.arg('--pub-key', + metavar='', + default=None, + help='Path to a public ssh key.') +@utils.arg('--pub_key', + help=argparse.SUPPRESS) +def do_keypair_add(cs, args): + """Create a new key pair for use with instances.""" + name = args.name + pub_key = args.pub_key + + if pub_key: + try: + with open(os.path.expanduser(pub_key)) as f: + pub_key = f.read() + except IOError as e: + raise exceptions.CommandError("Can't open or read '%s': %s" % + (pub_key, e)) + + keypair = cs.keypairs.create(name, pub_key) + + if not pub_key: + private_key = keypair.private_key + print(private_key) + + +@utils.arg('name', metavar='', help='Keypair name to delete.') +def do_keypair_delete(cs, args): + """Delete keypair given by its name.""" + name = args.name + cs.keypairs.delete(name) + + +def do_keypair_list(cs, args): + """Print a list of keypairs for a user""" + keypairs = cs.keypairs.list() + columns = ['Name', 'Fingerprint'] + utils.print_list(keypairs, columns) + + +def _print_keypair(keypair): + kp = keypair._info.copy() + pk = kp.pop('public_key') + utils.print_dict(kp) + print("Public key: %s" % pk) + + +@utils.arg('keypair', + metavar='', + help="Name or ID of keypair") +def do_keypair_show(cs, args): + """Show details about the given keypair.""" + keypair = cs.keypairs.get(args.keypair) + _print_keypair(keypair) + + +@utils.arg('--tenant', + #nova db searches by project_id + dest='tenant', + metavar='', + nargs='?', + help='Display information from single tenant (Admin only).') +@utils.arg('--reserved', + dest='reserved', + action='store_true', + default=False, + help='Include reservations count.') +def do_absolute_limits(cs, args): + """Print a list of absolute limits for a user""" + limits = cs.limits.get(args.reserved, args.tenant).absolute + columns = ['Name', 'Value'] + utils.print_list(limits, columns) + + +def do_rate_limits(cs, args): + """Print a list of rate limits for a user""" + limits = cs.limits.get().rate + columns = ['Verb', 'URI', 'Value', 'Remain', 'Unit', 'Next_Available'] + utils.print_list(limits, columns) + + +@utils.arg('--start', metavar='', + help='Usage range start date ex 2012-01-20 (default: 4 weeks ago)', + default=None) +@utils.arg('--end', metavar='', + help='Usage range end date, ex 2012-01-20 (default: tomorrow) ', + default=None) +def do_usage_list(cs, args): + """List usage data for all tenants.""" + dateformat = "%Y-%m-%d" + rows = ["Tenant ID", "Instances", "RAM MB-Hours", "CPU Hours", + "Disk GB-Hours"] + + now = timeutils.utcnow() + + if args.start: + start = datetime.datetime.strptime(args.start, dateformat) + else: + start = now - datetime.timedelta(weeks=4) + + if args.end: + end = datetime.datetime.strptime(args.end, dateformat) + else: + end = now + datetime.timedelta(days=1) + + def simplify_usage(u): + simplerows = map(lambda x: x.lower().replace(" ", "_"), rows) + + setattr(u, simplerows[0], u.tenant_id) + setattr(u, simplerows[1], "%d" % len(u.server_usages)) + setattr(u, simplerows[2], "%.2f" % u.total_memory_mb_usage) + setattr(u, simplerows[3], "%.2f" % u.total_vcpus_usage) + setattr(u, simplerows[4], "%.2f" % u.total_local_gb_usage) + + usage_list = cs.usage.list(start, end, detailed=True) + + print("Usage from %s to %s:" % (start.strftime(dateformat), + end.strftime(dateformat))) + + for usage in usage_list: + simplify_usage(usage) + + utils.print_list(usage_list, rows) + + +@utils.arg('--start', metavar='', + help='Usage range start date ex 2012-01-20 (default: 4 weeks ago)', + default=None) +@utils.arg('--end', metavar='', + help='Usage range end date, ex 2012-01-20 (default: tomorrow) ', + default=None) +@utils.arg('--tenant', metavar='', + default=None, + help='UUID or name of tenant to get usage for.') +def do_usage(cs, args): + """Show usage data for a single tenant.""" + dateformat = "%Y-%m-%d" + rows = ["Instances", "RAM MB-Hours", "CPU Hours", "Disk GB-Hours"] + + now = timeutils.utcnow() + + if args.start: + start = datetime.datetime.strptime(args.start, dateformat) + else: + start = now - datetime.timedelta(weeks=4) + + if args.end: + end = datetime.datetime.strptime(args.end, dateformat) + else: + end = now + datetime.timedelta(days=1) + + def simplify_usage(u): + simplerows = map(lambda x: x.lower().replace(" ", "_"), rows) + + setattr(u, simplerows[0], "%d" % len(u.server_usages)) + setattr(u, simplerows[1], "%.2f" % u.total_memory_mb_usage) + setattr(u, simplerows[2], "%.2f" % u.total_vcpus_usage) + setattr(u, simplerows[3], "%.2f" % u.total_local_gb_usage) + + if args.tenant: + usage = cs.usage.get(args.tenant, start, end) + else: + usage = cs.usage.get(cs.client.tenant_id, start, end) + + print("Usage from %s to %s:" % (start.strftime(dateformat), + end.strftime(dateformat))) + + if getattr(usage, 'total_vcpus_usage', None): + simplify_usage(usage) + utils.print_list([usage], rows) + else: + print('None') + + +@utils.arg('pk_filename', + metavar='', + nargs='?', + default='pk.pem', + help='Filename for the private key [Default: pk.pem]') +@utils.arg('cert_filename', + metavar='', + nargs='?', + default='cert.pem', + help='Filename for the X.509 certificate [Default: cert.pem]') +def do_x509_create_cert(cs, args): + """Create x509 cert for a user in tenant.""" + + if os.path.exists(args.pk_filename): + raise exceptions.CommandError("Unable to write privatekey - %s exists." + % args.pk_filename) + if os.path.exists(args.cert_filename): + raise exceptions.CommandError("Unable to write x509 cert - %s exists." + % args.cert_filename) + + certs = cs.certs.create() + + try: + old_umask = os.umask(0o377) + with open(args.pk_filename, 'w') as private_key: + private_key.write(certs.private_key) + print("Wrote private key to %s" % args.pk_filename) + finally: + os.umask(old_umask) + + with open(args.cert_filename, 'w') as cert: + cert.write(certs.data) + print("Wrote x509 certificate to %s" % args.cert_filename) + + +@utils.arg('filename', + metavar='', + nargs='?', + default='cacert.pem', + help='Filename to write the x509 root cert.') +def do_x509_get_root_cert(cs, args): + """Fetch the x509 root cert.""" + if os.path.exists(args.filename): + raise exceptions.CommandError("Unable to write x509 root cert - \ + %s exists." % args.filename) + + with open(args.filename, 'w') as cert: + cacert = cs.certs.get() + cert.write(cacert.data) + print("Wrote x509 root cert to %s" % args.filename) + + +@utils.arg('--hypervisor', metavar='', default=None, + help='type of hypervisor.') +def do_agent_list(cs, args): + """List all builds.""" + result = cs.agents.list(args.hypervisor) + columns = ["Agent_id", "Hypervisor", "OS", "Architecture", "Version", + 'Md5hash', 'Url'] + utils.print_list(result, columns) + + +@utils.arg('os', metavar='', help='type of os.') +@utils.arg('architecture', metavar='', + help='type of architecture') +@utils.arg('version', metavar='', help='version') +@utils.arg('url', metavar='', help='url') +@utils.arg('md5hash', metavar='', help='md5 hash') +@utils.arg('hypervisor', metavar='', default='xen', + help='type of hypervisor.') +def do_agent_create(cs, args): + """Create new agent build.""" + result = cs.agents.create(args.os, args.architecture, + args.version, args.url, + args.md5hash, args.hypervisor) + utils.print_dict(result._info.copy()) + + +@utils.arg('id', metavar='', help='id of the agent-build') +def do_agent_delete(cs, args): + """Delete existing agent build.""" + cs.agents.delete(args.id) + + +@utils.arg('id', metavar='', help='id of the agent-build') +@utils.arg('version', metavar='', help='version') +@utils.arg('url', metavar='', help='url') +@utils.arg('md5hash', metavar='', help='md5hash') +def do_agent_modify(cs, args): + """Modify existing agent build.""" + result = cs.agents.update(args.id, args.version, + args.url, args.md5hash) + utils.print_dict(result._info) + + +def _find_aggregate(cs, aggregate): + """Get a aggregate by name or ID.""" + return utils.find_resource(cs.aggregates, aggregate) + + +def do_aggregate_list(cs, args): + """Print a list of all aggregates.""" + aggregates = cs.aggregates.list() + columns = ['Id', 'Name', 'Availability Zone'] + utils.print_list(aggregates, columns) + + +@utils.arg('name', metavar='', help='Name of aggregate.') +@utils.arg('availability_zone', + metavar='', + default=None, + nargs='?', + help='The availability zone of the aggregate (optional).') +def do_aggregate_create(cs, args): + """Create a new aggregate with the specified details.""" + aggregate = cs.aggregates.create(args.name, args.availability_zone) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', + help='Name or ID of aggregate to delete.') +def do_aggregate_delete(cs, args): + """Delete the aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + cs.aggregates.delete(aggregate) + print("Aggregate %s has been successfully deleted." % aggregate.id) + + +@utils.arg('aggregate', metavar='', + help='Name or ID of aggregate to update.') +@utils.arg('name', metavar='', help='Name of aggregate.') +@utils.arg('availability_zone', + metavar='', + nargs='?', + default=None, + help='The availability zone of the aggregate.') +def do_aggregate_update(cs, args): + """Update the aggregate's name and optionally availability zone.""" + aggregate = _find_aggregate(cs, args.aggregate) + updates = {"name": args.name} + if args.availability_zone: + updates["availability_zone"] = args.availability_zone + + aggregate = cs.aggregates.update(aggregate.id, updates) + print("Aggregate %s has been successfully updated." % aggregate.id) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', + help='Name or ID of aggregate to update.') +@utils.arg('metadata', + metavar='', + nargs='+', + action='append', + default=[], + help='Metadata to add/update to aggregate') +def do_aggregate_set_metadata(cs, args): + """Update the metadata associated with the aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + metadata = _extract_metadata(args) + aggregate = cs.aggregates.set_metadata(aggregate.id, metadata) + print("Aggregate %s has been successfully updated." % aggregate.id) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', help='Name or ID of aggregate.') +@utils.arg('host', metavar='', help='The host to add to the aggregate.') +def do_aggregate_add_host(cs, args): + """Add the host to the specified aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + aggregate = cs.aggregates.add_host(aggregate.id, args.host) + print("Aggregate %s has been successfully updated." % aggregate.id) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', help='Name or ID of aggregate.') +@utils.arg('host', metavar='', + help='The host to remove from the aggregate.') +def do_aggregate_remove_host(cs, args): + """Remove the specified host from the specified aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + aggregate = cs.aggregates.remove_host(aggregate.id, args.host) + print("Aggregate %s has been successfully updated." % aggregate.id) + _print_aggregate_details(aggregate) + + +@utils.arg('aggregate', metavar='', help='Name or ID of aggregate.') +def do_aggregate_details(cs, args): + """Show details of the specified aggregate.""" + aggregate = _find_aggregate(cs, args.aggregate) + _print_aggregate_details(aggregate) + + +def _print_aggregate_details(aggregate): + columns = ['Id', 'Name', 'Availability Zone', 'Hosts', 'Metadata'] + utils.print_list([aggregate], columns) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('host', metavar='', default=None, nargs='?', + help='destination host name.') +@utils.arg('--block-migrate', + action='store_true', + dest='block_migrate', + default=False, + help='True in case of block_migration.\ + (Default=False:live_migration)') +@utils.arg('--block_migrate', + action='store_true', + help=argparse.SUPPRESS) +@utils.arg('--disk-over-commit', + action='store_true', + dest='disk_over_commit', + default=False, + help='Allow overcommit.(Default=False)') +@utils.arg('--disk_over_commit', + action='store_true', + help=argparse.SUPPRESS) +def do_live_migration(cs, args): + """Migrate running instance to a new machine.""" + _find_server(cs, args.server).live_migrate(args.host, + args.block_migrate, + args.disk_over_commit) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--active', action='store_const', dest='state', + default='error', const='active', + help='Request the instance be reset to "active" state instead ' + 'of "error" state (the default).') +def do_reset_state(cs, args): + """Reset the state of an instance.""" + _find_server(cs, args.server).reset_state(args.state) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_reset_network(cs, args): + """Reset network of an instance.""" + _find_server(cs, args.server).reset_network() + + +@utils.arg('--host', metavar='', default=None, + help='Name of host.') +@utils.arg('--binary', metavar='', default=None, + help='Service binary.') +def do_service_list(cs, args): + """Show a list of all running services. Filter by host & binary.""" + result = cs.services.list(host=args.host, binary=args.binary) + columns = ["Binary", "Host", "Zone", "Status", "State", "Updated_at"] + # NOTE(sulo): we check if the response has disabled_reason + # so as not to add the column when the extended ext is not enabled. + if hasattr(result[0], 'disabled_reason'): + columns.append("Disabled Reason") + utils.print_list(result, columns) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('binary', metavar='', help='Service binary.') +def do_service_enable(cs, args): + """Enable the service.""" + result = cs.services.enable(args.host, args.binary) + utils.print_list([result], ['Host', 'Binary', 'Status']) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('binary', metavar='', help='Service binary.') +@utils.arg('--reason', metavar='', + help='Reason for disabling service.') +def do_service_disable(cs, args): + """Disable the service.""" + if args.reason: + result = cs.services.disable_log_reason(args.host, args.binary, + args.reason) + utils.print_list([result], ['Host', 'Binary', 'Status', + 'Disabled Reason']) + else: + result = cs.services.disable(args.host, args.binary) + utils.print_list([result], ['Host', 'Binary', 'Status']) + + +@utils.arg('fixed_ip', metavar='', help='Fixed IP Address.') +def do_fixed_ip_get(cs, args): + """Retrieve info on a fixed ip.""" + result = cs.fixed_ips.get(args.fixed_ip) + utils.print_list([result], ['address', 'cidr', 'hostname', 'host']) + + +@utils.arg('fixed_ip', metavar='', help='Fixed IP Address.') +def do_fixed_ip_reserve(cs, args): + """Reserve a fixed IP.""" + cs.fixed_ips.reserve(args.fixed_ip) + + +@utils.arg('fixed_ip', metavar='', help='Fixed IP Address.') +def do_fixed_ip_unreserve(cs, args): + """Unreserve a fixed IP.""" + cs.fixed_ips.unreserve(args.fixed_ip) + + +@utils.arg('host', metavar='', help='Name of host.') +def do_host_describe(cs, args): + """Describe a specific host.""" + result = cs.hosts.get(args.host) + columns = ["HOST", "PROJECT", "cpu", "memory_mb", "disk_gb"] + utils.print_list(result, columns) + + +@utils.arg('--zone', metavar='', default=None, + help='Filters the list, returning only those ' + 'hosts in the availability zone .') +def do_host_list(cs, args): + """List all hosts by service.""" + columns = ["host_name", "service", "zone"] + result = cs.hosts.list(args.zone) + utils.print_list(result, columns) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('--status', metavar='', default=None, dest='status', + help='Either enable or disable a host.') +@utils.arg('--maintenance', + metavar='', + default=None, + dest='maintenance', + help='Either put or resume host to/from maintenance.') +def do_host_update(cs, args): + """Update host settings.""" + updates = {} + columns = ["HOST"] + if args.status: + updates['status'] = args.status + columns.append("status") + if args.maintenance: + updates['maintenance_mode'] = args.maintenance + columns.append("maintenance_mode") + result = cs.hosts.update(args.host, updates) + utils.print_list([result], columns) + + +@utils.arg('host', metavar='', help='Name of host.') +@utils.arg('--action', metavar='', dest='action', + choices=['startup', 'shutdown', 'reboot'], + help='A power action: startup, reboot, or shutdown.') +def do_host_action(cs, args): + """Perform a power action on a host.""" + result = cs.hosts.host_action(args.host, args.action) + utils.print_list([result], ['HOST', 'power_action']) + + +@utils.arg('--combine', + dest='combine', + action="store_true", + default=False, + help='Generate a single report for all services.') +def do_coverage_start(cs, args): + """Start Nova coverage reporting.""" + cs.coverage.start(combine=args.combine) + print("Coverage collection started") + + +def do_coverage_stop(cs, args): + """Stop Nova coverage reporting.""" + out = cs.coverage.stop() + print("Coverage data file path: %s" % out[-1]['path']) + + +@utils.arg('filename', metavar='', help='report filename') +@utils.arg('--html', + dest='html', + action="store_true", + default=False, + help='Generate HTML reports instead of text ones.') +@utils.arg('--xml', + dest='xml', + action="store_true", + default=False, + help='Generate XML reports instead of text ones.') +def do_coverage_report(cs, args): + """Generate coverage report.""" + if args.html == True and args.xml == True: + raise exceptions.CommandError("--html and --xml must not be " + "specified together.") + cov = cs.coverage.report(args.filename, xml=args.xml, html=args.html) + print("Report path: %s" % cov[-1]['path']) + + +def do_coverage_reset(cs, args): + """Reset coverage data.""" + cs.coverage.reset() + print("Coverage data reset") + + +def _find_hypervisor(cs, hypervisor): + """Get a hypervisor by name or ID.""" + return utils.find_resource(cs.hypervisors, hypervisor) + + +@utils.arg('--matching', metavar='', default=None, + help='List hypervisors matching the given .') +def do_hypervisor_list(cs, args): + """List hypervisors.""" + columns = ['ID', 'Hypervisor hostname'] + if args.matching: + utils.print_list(cs.hypervisors.search(args.matching), columns) + else: + # Since we're not outputting detail data, choose + # detailed=False for server-side efficiency + utils.print_list(cs.hypervisors.list(False), columns) + + +@utils.arg('hostname', metavar='', + help='The hypervisor hostname (or pattern) to search for.') +def do_hypervisor_servers(cs, args): + """List instances belonging to specific hypervisors.""" + hypers = cs.hypervisors.search(args.hostname, servers=True) + + class InstanceOnHyper(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + # Massage the result into a list to be displayed + instances = [] + for hyper in hypers: + hyper_host = hyper.hypervisor_hostname + hyper_id = hyper.id + if hasattr(hyper, 'servers'): + instances.extend([InstanceOnHyper(id=serv['uuid'], + name=serv['name'], + hypervisor_hostname=hyper_host, + hypervisor_id=hyper_id) + for serv in hyper.servers]) + + # Output the data + utils.print_list(instances, ['ID', 'Name', 'Hypervisor ID', + 'Hypervisor Hostname']) + + +@utils.arg('hypervisor', + metavar='', + help='Name or ID of the hypervisor to show the details of.') +def do_hypervisor_show(cs, args): + """Display the details of the specified hypervisor.""" + hyper = _find_hypervisor(cs, args.hypervisor) + + # Build up the dict + info = hyper._info.copy() + info['service_id'] = info['service']['id'] + info['service_host'] = info['service']['host'] + del info['service'] + + utils.print_dict(info) + + +@utils.arg('hypervisor', + metavar='', + help='Name or ID of the hypervisor to show the uptime of.') +def do_hypervisor_uptime(cs, args): + """Display the uptime of the specified hypervisor.""" + hyper = _find_hypervisor(cs, args.hypervisor) + hyper = cs.hypervisors.uptime(hyper) + + # Output the uptime information + utils.print_dict(hyper._info.copy()) + + +def do_hypervisor_stats(cs, args): + """Get hypervisor statistics over all compute nodes.""" + stats = cs.hypervisors.statistics() + utils.print_dict(stats._info.copy()) + + +def ensure_service_catalog_present(cs): + if not hasattr(cs.client, 'service_catalog'): + # Turn off token caching and re-auth + cs.client.unauthenticate() + cs.client.use_token_cache(False) + cs.client.authenticate() + + +def do_endpoints(cs, _args): + """Discover endpoints that get returned from the authenticate services.""" + ensure_service_catalog_present(cs) + catalog = cs.client.service_catalog.catalog + for e in catalog['access']['serviceCatalog']: + utils.print_dict(e['endpoints'][0], e['name']) + + +@utils.arg('--wrap', dest='wrap', metavar='', default=64, + help='wrap PKI tokens to a specified length, or 0 to disable') +def do_credentials(cs, _args): + """Show user credentials returned from auth.""" + ensure_service_catalog_present(cs) + catalog = cs.client.service_catalog.catalog + utils.print_dict(catalog['access']['user'], "User Credentials", + wrap=int(_args.wrap)) + utils.print_dict(catalog['access']['token'], "Token", wrap=int(_args.wrap)) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--port', + dest='port', + action='store', + type=int, + default=22, + help='Optional flag to indicate which port to use for ssh. ' + '(Default=22)') +@utils.arg('--private', + dest='private', + action='store_true', + default=False, + help='Optional flag to indicate whether to use private address ' + 'attached to an instance. (Default=False)') +@utils.arg('--ipv6', + dest='ipv6', + action='store_true', + default=False, + help='Optional flag to indicate whether to use an IPv6 address ' + 'attached to an instance. (Defaults to IPv4 address)') +@utils.arg('--login', metavar='', help='Login to use.', default="root") +@utils.arg('-i', '--identity', + dest='identity', + help='Private key file, same as the -i option to the ssh command.', + default='') +@utils.arg('--extra-opts', + dest='extra', + help='Extra options to pass to ssh. see: man ssh', + default='') +def do_ssh(cs, args): + """SSH into a server.""" + addresses = _find_server(cs, args.server).addresses + address_type = "private" if args.private else "public" + version = 6 if args.ipv6 else 4 + + if address_type not in addresses: + print("ERROR: No %s addresses found for '%s'." % (address_type, + args.server)) + return + + ip_address = None + for address in addresses[address_type]: + if address['version'] == version: + ip_address = address['addr'] + break + + identity = '-i %s' % args.identity if len(args.identity) else '' + + if ip_address: + os.system("ssh -%d -p%d %s %s@%s %s" % (version, args.port, identity, + args.login, ip_address, + args.extra)) + else: + pretty_version = "IPv%d" % version + print("ERROR: No %s %s address found." % (address_type, + pretty_version)) + return + + +_quota_resources = ['instances', 'cores', 'ram', 'volumes', 'gigabytes', + 'floating_ips', 'fixed_ips', 'metadata_items', + 'injected_files', 'key_pairs', + 'injected_file_content_bytes', 'injected_file_path_bytes', + 'security_groups', 'security_group_rules'] + + +def _quota_show(quotas): + quota_dict = {} + for resource in _quota_resources: + try: + quota_dict[resource] = getattr(quotas, resource) + except AttributeError: + pass + utils.print_dict(quota_dict) + + +def _quota_update(manager, identifier, args): + updates = {} + for resource in _quota_resources: + val = getattr(args, resource, None) + if val is not None: + updates[resource] = val + + if updates: + # default value of force is None to make sure this client + # will be compatibile with old nova server + force_update = getattr(args, 'force', None) + if isinstance(manager, quotas.QuotaSetManager): + manager.update(identifier, force=force_update, **updates) + else: + manager.update(identifier, **updates) + + +@utils.arg('--tenant', + metavar='', + default=None, + help='ID of tenant to list the quotas for.') +def do_quota_show(cs, args): + """List the quotas for a tenant.""" + + if not args.tenant: + _quota_show(cs.quotas.get(cs.client.tenant_id)) + else: + _quota_show(cs.quotas.get(args.tenant)) + + +@utils.arg('--tenant', + metavar='', + default=None, + help='ID of tenant to list the default quotas for.') +def do_quota_defaults(cs, args): + """List the default quotas for a tenant.""" + + if not args.tenant: + _quota_show(cs.quotas.defaults(cs.client.tenant_id)) + else: + _quota_show(cs.quotas.defaults(args.tenant)) + + +@utils.arg('tenant', + metavar='', + help='ID of tenant to set the quotas for.') +@utils.arg('--instances', + metavar='', + type=int, default=None, + help='New value for the "instances" quota.') +@utils.arg('--cores', + metavar='', + type=int, default=None, + help='New value for the "cores" quota.') +@utils.arg('--ram', + metavar='', + type=int, default=None, + help='New value for the "ram" quota.') +@utils.arg('--volumes', + metavar='', + type=int, default=None, + help='New value for the "volumes" quota.') +@utils.arg('--gigabytes', + metavar='', + type=int, default=None, + help='New value for the "gigabytes" quota.') +@utils.arg('--floating-ips', + metavar='', + type=int, + default=None, + help='New value for the "floating-ips" quota.') +@utils.arg('--floating_ips', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--fixed-ips', + metavar='', + type=int, + default=None, + help='New value for the "fixed-ips" quota.') +@utils.arg('--metadata-items', + metavar='', + type=int, + default=None, + help='New value for the "metadata-items" quota.') +@utils.arg('--metadata_items', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-files', + metavar='', + type=int, + default=None, + help='New value for the "injected-files" quota.') +@utils.arg('--injected_files', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-file-content-bytes', + metavar='', + type=int, + default=None, + help='New value for the "injected-file-content-bytes" quota.') +@utils.arg('--injected_file_content_bytes', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-file-path-bytes', + metavar='', + type=int, + default=None, + help='New value for the "injected-file-path-bytes" quota.') +@utils.arg('--key-pairs', + metavar='', + type=int, + default=None, + help='New value for the "key-pairs" quota.') +@utils.arg('--security-groups', + metavar='', + type=int, + default=None, + help='New value for the "security-groups" quota.') +@utils.arg('--security-group-rules', + metavar='', + type=int, + default=None, + help='New value for the "security-group-rules" quota.') +@utils.arg('--force', + dest='force', + action="store_true", + default=None, + help='Whether force update the quota even if the already used' + ' and reserved exceeds the new quota') +def do_quota_update(cs, args): + """Update the quotas for a tenant.""" + + _quota_update(cs.quotas, args.tenant, args) + + +@utils.arg('--tenant', + metavar='', + help='ID of tenant to delete quota for.') +def do_quota_delete(cs, args): + """Delete quota for a tenant so their quota will revert back to default.""" + + cs.quotas.delete(args.tenant) + + +@utils.arg('class_name', + metavar='', + help='Name of quota class to list the quotas for.') +def do_quota_class_show(cs, args): + """List the quotas for a quota class.""" + + _quota_show(cs.quota_classes.get(args.class_name)) + + +@utils.arg('class_name', + metavar='', + help='Name of quota class to set the quotas for.') +@utils.arg('--instances', + metavar='', + type=int, default=None, + help='New value for the "instances" quota.') +@utils.arg('--cores', + metavar='', + type=int, default=None, + help='New value for the "cores" quota.') +@utils.arg('--ram', + metavar='', + type=int, default=None, + help='New value for the "ram" quota.') +@utils.arg('--volumes', + metavar='', + type=int, default=None, + help='New value for the "volumes" quota.') +@utils.arg('--gigabytes', + metavar='', + type=int, default=None, + help='New value for the "gigabytes" quota.') +@utils.arg('--floating-ips', + metavar='', + type=int, + default=None, + help='New value for the "floating-ips" quota.') +@utils.arg('--floating_ips', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--metadata-items', + metavar='', + type=int, + default=None, + help='New value for the "metadata-items" quota.') +@utils.arg('--metadata_items', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-files', + metavar='', + type=int, + default=None, + help='New value for the "injected-files" quota.') +@utils.arg('--injected_files', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-file-content-bytes', + metavar='', + type=int, + default=None, + help='New value for the "injected-file-content-bytes" quota.') +@utils.arg('--injected_file_content_bytes', + type=int, + help=argparse.SUPPRESS) +@utils.arg('--injected-file-path-bytes', + metavar='', + type=int, + default=None, + help='New value for the "injected-file-path-bytes" quota.') +@utils.arg('--key-pairs', + metavar='', + type=int, + default=None, + help='New value for the "key-pairs" quota.') +@utils.arg('--security-groups', + metavar='', + type=int, + default=None, + help='New value for the "security-groups" quota.') +@utils.arg('--security-group-rules', + metavar='', + type=int, + default=None, + help='New value for the "security-group-rules" quota.') +def do_quota_class_update(cs, args): + """Update the quotas for a quota class.""" + + _quota_update(cs.quota_classes, args.class_name, args) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('host', metavar='', help='Name or ID of target host.') +@utils.arg('--password', + dest='password', + metavar='', + default=None, + help="Set the provided password on the evacuated instance. Not applicable " + "with on-shared-storage flag") +@utils.arg('--on-shared-storage', + dest='on_shared_storage', + action="store_true", + default=False, + help='Specifies whether instance files located on shared storage') +def do_evacuate(cs, args): + """Evacuate server from failed host to specified one.""" + server = _find_server(cs, args.server) + + res = server.evacuate(args.host, args.on_shared_storage, args.password)[1] + if type(res) is dict: + utils.print_dict(res) + + +def _print_interfaces(interfaces): + columns = ['Port State', 'Port ID', 'Net ID', 'IP addresses', + 'MAC Addr'] + + class FormattedInterface(object): + def __init__(self, interface): + for col in columns: + key = col.lower().replace(" ", "_") + if hasattr(interface, key): + setattr(self, key, getattr(interface, key)) + self.ip_addresses = ",".join([fip['ip_address'] + for fip in interface.fixed_ips]) + utils.print_list([FormattedInterface(i) for i in interfaces], columns) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +def do_interface_list(cs, args): + """List interfaces attached to an instance.""" + server = _find_server(cs, args.server) + + res = server.interface_list() + if type(res) is list: + _print_interfaces(res) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('--port-id', metavar='', help='Port ID.', dest="port_id") +@utils.arg('--net-id', metavar='', help='Network ID', + default=None, dest="net_id") +@utils.arg('--fixed-ip', metavar='', help='Requested fixed IP.', + default=None, dest="fixed_ip") +def do_interface_attach(cs, args): + """Attach a network interface to an instance.""" + server = _find_server(cs, args.server) + + res = server.interface_attach(args.port_id, args.net_id, args.fixed_ip) + if type(res) is dict: + utils.print_dict(res) + + +@utils.arg('server', metavar='', help='Name or ID of server.') +@utils.arg('port_id', metavar='', help='Port ID.') +def do_interface_detach(cs, args): + """Detach a network interface from an instance.""" + server = _find_server(cs, args.server) + + res = server.interface_detach(args.port_id) + if type(res) is dict: + utils.print_dict(res) + + +def _treeizeAvailabilityZone(zone): + """Build a tree view for availability zones.""" + AvailabilityZone = availability_zones.AvailabilityZone + + az = AvailabilityZone(zone.manager, + copy.deepcopy(zone._info), zone._loaded) + result = [] + + # Zone tree view item + az.zoneName = zone.zoneName + az.zoneState = ('available' + if zone.zoneState['available'] else 'not available') + az._info['zoneName'] = az.zoneName + az._info['zoneState'] = az.zoneState + result.append(az) + + if zone.hosts is not None: + for (host, services) in zone.hosts.items(): + # Host tree view item + az = AvailabilityZone(zone.manager, + copy.deepcopy(zone._info), zone._loaded) + az.zoneName = '|- %s' % host + az.zoneState = '' + az._info['zoneName'] = az.zoneName + az._info['zoneState'] = az.zoneState + result.append(az) + + for (svc, state) in services.items(): + # Service tree view item + az = AvailabilityZone(zone.manager, + copy.deepcopy(zone._info), zone._loaded) + az.zoneName = '| |- %s' % svc + az.zoneState = '%s %s %s' % ( + 'enabled' if state['active'] else 'disabled', + ':-)' if state['available'] else 'XXX', + state['updated_at']) + az._info['zoneName'] = az.zoneName + az._info['zoneState'] = az.zoneState + result.append(az) + return result + + +@utils.service_type('compute') +def do_availability_zone_list(cs, _args): + """List all the availability zones.""" + try: + availability_zones = cs.availability_zones.list() + except exceptions.Forbidden as e: # policy doesn't allow probably + try: + availability_zones = cs.availability_zones.list(detailed=False) + except Exception: + raise e + + result = [] + for zone in availability_zones: + result += _treeizeAvailabilityZone(zone) + _translate_availability_zone_keys(result) + utils.print_list(result, ['Name', 'Status'], + sortby_index=None) diff --git a/awx/lib/site-packages/os_diskconfig_python_novaclient_ext/__init__.py b/awx/lib/site-packages/os_diskconfig_python_novaclient_ext/__init__.py new file mode 100644 index 0000000000..32dc899e9f --- /dev/null +++ b/awx/lib/site-packages/os_diskconfig_python_novaclient_ext/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Disk Config extension +""" + +from novaclient import utils +from novaclient.v1_1 import servers +from novaclient.v1_1 import shell + +API_DISK_CONFIG = "OS-DCF:diskConfig" + + +def add_args(): + utils.add_arg(shell.do_boot, + '--disk-config', + default=None, + metavar='', + help="Whether to expand primary partition to fill disk." + " This overrides the value inherited from image.") + + +def bind_args_to_resource_manager(args): + def add_disk_config(args): + return dict(disk_config=args.disk_config) + + utils.add_resource_manager_extra_kwargs_hook( + shell.do_boot, add_disk_config) + + +def add_modify_body_hook(): + def modify_body_for_create(body, **kwargs): + disk_config = kwargs.get('disk_config') + if disk_config: + disk_config = disk_config.upper() + + if disk_config in ('AUTO', 'MANUAL'): + body["server"][API_DISK_CONFIG] = disk_config + else: + raise Exception("Unrecognized disk_config '%s'" % disk_config) + + servers.ServerManager.add_hook( + 'modify_body_for_create', modify_body_for_create) + + +def __pre_parse_args__(): + add_args() + + +def __post_parse_args__(args): + bind_args_to_resource_manager(args) + add_modify_body_hook() diff --git a/awx/lib/site-packages/os_networksv2_python_novaclient_ext.py b/awx/lib/site-packages/os_networksv2_python_novaclient_ext.py new file mode 100644 index 0000000000..0cd51aa46e --- /dev/null +++ b/awx/lib/site-packages/os_networksv2_python_novaclient_ext.py @@ -0,0 +1,77 @@ +# Copyright 2011 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from novaclient import base +from novaclient import utils + + +class Network(base.Resource): + def delete(self): + self.manager.delete(network=self) + + +class NetworkManager(base.ManagerWithFind): + resource_class = base.Resource + + def list(self): + return self._list('/os-networksv2', 'networks') + + def get(self, network): + return self._get('/os-networksv2/%s' % base.getid(network), 'network') + + def delete(self, network): + self._delete('/os-networksv2/%s' % base.getid(network)) + + def create(self, label, cidr): + body = {'network': {'label': label, 'cidr': cidr}} + return self._create('/os-networksv2', body, 'network') + + +@utils.arg('network_id', metavar='', help='ID of network') +def do_network(cs, args): + """ + Show a network + """ + network = cs.os_networksv2_python_novaclient_ext.get(args.network_id) + utils.print_dict(network._info) + + +def do_network_list(cs, args): + """ + List networks + """ + networks = cs.os_networksv2_python_novaclient_ext.list() + utils.print_list(networks, ['ID', 'Label', 'CIDR']) + + +@utils.arg('label', metavar='', + help='Network label (ex. my_new_network)') +@utils.arg('cidr', metavar='', + help='IP block to allocate from (ex. 172.16.0.0/24 or ' + '2001:DB8::/64)') +def do_network_create(cs, args): + """ + Create a network + """ + network = cs.os_networksv2_python_novaclient_ext.create(args.label, + args.cidr) + utils.print_dict(network._info) + + +@utils.arg('network_id', metavar='', help='ID of network') +def do_network_delete(cs, args): + """ + Delete a network + """ + cs.os_networksv2_python_novaclient_ext.delete(args.network_id) diff --git a/awx/lib/site-packages/pbr/__init__.py b/awx/lib/site-packages/pbr/__init__.py new file mode 100644 index 0000000000..f745a135ae --- /dev/null +++ b/awx/lib/site-packages/pbr/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/awx/lib/site-packages/pbr/core.py b/awx/lib/site-packages/pbr/core.py new file mode 100644 index 0000000000..f622ad000b --- /dev/null +++ b/awx/lib/site-packages/pbr/core.py @@ -0,0 +1,127 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +from distutils import core +from distutils import errors +import os +import sys +import warnings + +from setuptools import dist + +from pbr import util + + +core.Distribution = dist._get_unpatched(core.Distribution) +if sys.version_info[0] == 3: + string_type = str + integer_types = (int,) +else: + string_type = basestring + integer_types = (int, long) + + +def pbr(dist, attr, value): + """Implements the actual pbr setup() keyword. When used, this should be + the only keyword in your setup() aside from `setup_requires`. + + If given as a string, the value of pbr is assumed to be the relative path + to the setup.cfg file to use. Otherwise, if it evaluates to true, it + simply assumes that pbr should be used, and the default 'setup.cfg' is + used. + + This works by reading the setup.cfg file, parsing out the supported + metadata and command options, and using them to rebuild the + `DistributionMetadata` object and set the newly added command options. + + The reason for doing things this way is that a custom `Distribution` class + will not play nicely with setup_requires; however, this implementation may + not work well with distributions that do use a `Distribution` subclass. + """ + + if not value: + return + if isinstance(value, string_type): + path = os.path.abspath(value) + else: + path = os.path.abspath('setup.cfg') + if not os.path.exists(path): + raise errors.DistutilsFileError( + 'The setup.cfg file %s does not exist.' % path) + + # Converts the setup.cfg file to setup() arguments + try: + attrs = util.cfg_to_args(path) + except Exception: + e = sys.exc_info()[1] + raise errors.DistutilsSetupError( + 'Error parsing %s: %s: %s' % (path, e.__class__.__name__, e)) + + # Repeat some of the Distribution initialization code with the newly + # provided attrs + if attrs: + # Skips 'options' and 'licence' support which are rarely used; may add + # back in later if demanded + for key, val in attrs.items(): + if hasattr(dist.metadata, 'set_' + key): + getattr(dist.metadata, 'set_' + key)(val) + elif hasattr(dist.metadata, key): + setattr(dist.metadata, key, val) + elif hasattr(dist, key): + setattr(dist, key, val) + else: + msg = 'Unknown distribution option: %s' % repr(key) + warnings.warn(msg) + + # Re-finalize the underlying Distribution + core.Distribution.finalize_options(dist) + + # This bit comes out of distribute/setuptools + if isinstance(dist.metadata.version, integer_types + (float,)): + # Some people apparently take "version number" too literally :) + dist.metadata.version = str(dist.metadata.version) + + # This bit of hackery is necessary so that the Distribution will ignore + # normally unsupport command options (namely pre-hooks and post-hooks). + # dist.command_options is normally a dict mapping command names to dicts of + # their options. Now it will be a defaultdict that returns IgnoreDicts for + # the each command's options so we can pass through the unsupported options + ignore = ['pre_hook.*', 'post_hook.*'] + dist.command_options = util.DefaultGetDict(lambda: util.IgnoreDict(ignore)) diff --git a/awx/lib/site-packages/pbr/extra_files.py b/awx/lib/site-packages/pbr/extra_files.py new file mode 100644 index 0000000000..a72db0c133 --- /dev/null +++ b/awx/lib/site-packages/pbr/extra_files.py @@ -0,0 +1,35 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from distutils import errors +import os + +_extra_files = [] + + +def get_extra_files(): + global _extra_files + return _extra_files + + +def set_extra_files(extra_files): + # Let's do a sanity check + for filename in extra_files: + if not os.path.exists(filename): + raise errors.DistutilsFileError( + '%s from the extra_files option in setup.cfg does not ' + 'exist' % filename) + global _extra_files + _extra_files[:] = extra_files[:] diff --git a/awx/lib/site-packages/pbr/find_package.py b/awx/lib/site-packages/pbr/find_package.py new file mode 100644 index 0000000000..2319c060dc --- /dev/null +++ b/awx/lib/site-packages/pbr/find_package.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import setuptools + + +def smart_find_packages(package_list): + """Run find_packages the way we intend.""" + packages = [] + for pkg in package_list.strip().split("\n"): + pkg_path = pkg.replace('.', os.path.sep) + packages.append(pkg) + packages.extend(['%s.%s' % (pkg, f) + for f in setuptools.find_packages(pkg_path)]) + return "\n".join(set(packages)) diff --git a/awx/lib/site-packages/pbr/hooks/__init__.py b/awx/lib/site-packages/pbr/hooks/__init__.py new file mode 100644 index 0000000000..b35cd4255a --- /dev/null +++ b/awx/lib/site-packages/pbr/hooks/__init__.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from pbr.hooks import backwards +from pbr.hooks import commands +from pbr.hooks import files +from pbr.hooks import metadata + + +def setup_hook(config): + """Filter config parsed from a setup.cfg to inject our defaults.""" + metadata_config = metadata.MetadataConfig(config) + metadata_config.run() + backwards.BackwardsCompatConfig(config).run() + commands.CommandsConfig(config).run() + files.FilesConfig(config, metadata_config.get_name()).run() diff --git a/awx/lib/site-packages/pbr/hooks/backwards.py b/awx/lib/site-packages/pbr/hooks/backwards.py new file mode 100644 index 0000000000..d9183b3fd7 --- /dev/null +++ b/awx/lib/site-packages/pbr/hooks/backwards.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from pbr.hooks import base +from pbr import packaging + + +class BackwardsCompatConfig(base.BaseConfig): + + section = 'backwards_compat' + + def hook(self): + self.config['include_package_data'] = 'True' + packaging.append_text_list( + self.config, 'dependency_links', + packaging.parse_dependency_links()) + packaging.append_text_list( + self.config, 'tests_require', + packaging.parse_requirements( + packaging.TEST_REQUIREMENTS_FILES)) diff --git a/awx/lib/site-packages/pbr/hooks/base.py b/awx/lib/site-packages/pbr/hooks/base.py new file mode 100644 index 0000000000..925573a78c --- /dev/null +++ b/awx/lib/site-packages/pbr/hooks/base.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class BaseConfig(object): + + section = None + + def __init__(self, config): + self._global_config = config + self.config = self._global_config.get(self.section, dict()) + self.pbr_config = config.get('pbr', dict()) + + def run(self): + self.hook() + self.save() + + def hook(self): + pass + + def save(self): + self._global_config[self.section] = self.config diff --git a/awx/lib/site-packages/pbr/hooks/commands.py b/awx/lib/site-packages/pbr/hooks/commands.py new file mode 100644 index 0000000000..b4206edd13 --- /dev/null +++ b/awx/lib/site-packages/pbr/hooks/commands.py @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from setuptools.command import easy_install + +from pbr.hooks import base +from pbr import packaging + + +class CommandsConfig(base.BaseConfig): + + section = 'global' + + def __init__(self, config): + super(CommandsConfig, self).__init__(config) + self.commands = self.config.get('commands', "") + + def save(self): + self.config['commands'] = self.commands + super(CommandsConfig, self).save() + + def add_command(self, command): + self.commands = "%s\n%s" % (self.commands, command) + + def hook(self): + self.add_command('pbr.packaging.LocalEggInfo') + self.add_command('pbr.packaging.LocalSDist') + self.add_command('pbr.packaging.LocalInstallScripts') + if os.name != 'nt': + easy_install.get_script_args = packaging.override_get_script_args + + if packaging.have_sphinx(): + self.add_command('pbr.packaging.LocalBuildDoc') + self.add_command('pbr.packaging.LocalBuildLatex') + + if os.path.exists('.testr.conf') and packaging.have_testr(): + # There is a .testr.conf file. We want to use it. + self.add_command('pbr.packaging.TestrTest') + elif self.config.get('nosetests', False) and packaging.have_nose(): + # We seem to still have nose configured + self.add_command('pbr.packaging.NoseTest') + + use_egg = packaging.get_boolean_option( + self.pbr_config, 'use-egg', 'PBR_USE_EGG') + # We always want non-egg install unless explicitly requested + if 'manpages' in self.pbr_config or not use_egg: + self.add_command('pbr.packaging.LocalInstall') diff --git a/awx/lib/site-packages/pbr/hooks/files.py b/awx/lib/site-packages/pbr/hooks/files.py new file mode 100644 index 0000000000..ba24aace99 --- /dev/null +++ b/awx/lib/site-packages/pbr/hooks/files.py @@ -0,0 +1,101 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +from pbr import find_package +from pbr.hooks import base + + +def get_manpath(): + manpath = 'share/man' + if os.path.exists(os.path.join(sys.prefix, 'man')): + # This works around a bug with install where it expects every node + # in the relative data directory to be an actual directory, since at + # least Debian derivatives (and probably other platforms as well) + # like to symlink Unixish /usr/local/man to /usr/local/share/man. + manpath = 'man' + return manpath + + +def get_man_section(section): + return os.path.join(get_manpath(), 'man%s' % section) + + +class FilesConfig(base.BaseConfig): + + section = 'files' + + def __init__(self, config, name): + super(FilesConfig, self).__init__(config) + self.name = name + self.data_files = self.config.get('data_files', '') + + def save(self): + self.config['data_files'] = self.data_files + super(FilesConfig, self).save() + + def expand_globs(self): + finished = [] + for line in self.data_files.split("\n"): + if line.rstrip().endswith('*') and '=' in line: + (target, source_glob) = line.split('=') + source_prefix = source_glob.strip()[:-1] + target = target.strip() + if not target.endswith(os.path.sep): + target += os.path.sep + for (dirpath, dirnames, fnames) in os.walk(source_prefix): + finished.append( + "%s = " % dirpath.replace(source_prefix, target)) + finished.extend( + [" %s" % os.path.join(dirpath, f) for f in fnames]) + else: + finished.append(line) + + self.data_files = "\n".join(finished) + + def add_man_path(self, man_path): + self.data_files = "%s\n%s =" % (self.data_files, man_path) + + def add_man_page(self, man_page): + self.data_files = "%s\n %s" % (self.data_files, man_page) + + def get_man_sections(self): + man_sections = dict() + manpages = self.pbr_config['manpages'] + for manpage in manpages.split(): + section_number = manpage.strip()[-1] + section = man_sections.get(section_number, list()) + section.append(manpage.strip()) + man_sections[section_number] = section + return man_sections + + def hook(self): + package = self.config.get('packages', self.name).strip() + if os.path.isdir(package): + self.config['packages'] = find_package.smart_find_packages(package) + + self.expand_globs() + + if 'manpages' in self.pbr_config: + man_sections = self.get_man_sections() + for (section, pages) in man_sections.items(): + manpath = get_man_section(section) + self.add_man_path(manpath) + for page in pages: + self.add_man_page(page) diff --git a/awx/lib/site-packages/pbr/hooks/metadata.py b/awx/lib/site-packages/pbr/hooks/metadata.py new file mode 100644 index 0000000000..687e14901b --- /dev/null +++ b/awx/lib/site-packages/pbr/hooks/metadata.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from pbr.hooks import base +from pbr import packaging + + +class MetadataConfig(base.BaseConfig): + + section = 'metadata' + + def hook(self): + self.config['version'] = packaging.get_version( + self.config['name'], self.config.get('version', None)) + packaging.append_text_list( + self.config, 'requires_dist', + packaging.parse_requirements()) + + def get_name(self): + return self.config['name'] diff --git a/awx/lib/site-packages/pbr/packaging.py b/awx/lib/site-packages/pbr/packaging.py new file mode 100644 index 0000000000..310ad390df --- /dev/null +++ b/awx/lib/site-packages/pbr/packaging.py @@ -0,0 +1,820 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2012-2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utilities with minimum-depends for use in setup.py +""" + +import email +import os +import re +import subprocess +import sys + +from distutils.command import install as du_install +import distutils.errors +from distutils import log +import pkg_resources +from setuptools.command import easy_install +from setuptools.command import egg_info +from setuptools.command import install +from setuptools.command import install_scripts +from setuptools.command import sdist + +try: + import cStringIO as io +except ImportError: + import io + +from pbr import extra_files + +log.set_verbosity(log.INFO) +TRUE_VALUES = ('true', '1', 'yes') +REQUIREMENTS_FILES = ('requirements.txt', 'tools/pip-requires') +TEST_REQUIREMENTS_FILES = ('test-requirements.txt', 'tools/test-requires') +# part of the standard library starting with 2.7 +# adding it to the requirements list screws distro installs +BROKEN_ON_27 = ('argparse', 'importlib') + + +def get_requirements_files(): + files = os.environ.get("PBR_REQUIREMENTS_FILES") + if files: + return tuple(f.strip() for f in files.split(',')) + return REQUIREMENTS_FILES + + +def append_text_list(config, key, text_list): + """Append a \n separated list to possibly existing value.""" + new_value = [] + current_value = config.get(key, "") + if current_value: + new_value.append(current_value) + new_value.extend(text_list) + config[key] = '\n'.join(new_value) + + +def _parse_mailmap(mailmap_info): + mapping = dict() + for l in mailmap_info: + try: + canonical_email, alias = re.match( + r'[^#]*?(<.+>).*(<.+>).*', l).groups() + except AttributeError: + continue + mapping[alias] = canonical_email + return mapping + + +def _wrap_in_quotes(values): + return ["'%s'" % value for value in values] + + +def _make_links_args(links): + return ["-f '%s'" % link for link in links] + + +def _pip_install(links, requires, root=None, option_dict=dict()): + if get_boolean_option( + option_dict, 'skip_pip_install', 'SKIP_PIP_INSTALL'): + return + root_cmd = "" + if root: + root_cmd = "--root=%s" % root + _run_shell_command( + "%s -m pip.__init__ install %s %s %s" % ( + sys.executable, + root_cmd, + " ".join(links), + " ".join(_wrap_in_quotes(requires))), + throw_on_error=True, buffer=False) + + +def read_git_mailmap(root_dir=None, mailmap='.mailmap'): + if not root_dir: + root_dir = _run_shell_command('git rev-parse --show-toplevel') + + mailmap = os.path.join(root_dir, mailmap) + if os.path.exists(mailmap): + return _parse_mailmap(open(mailmap, 'r').readlines()) + + return dict() + + +def canonicalize_emails(changelog, mapping): + """Takes in a string and an email alias mapping and replaces all + instances of the aliases in the string with their real email. + """ + for alias, email_address in mapping.items(): + changelog = changelog.replace(alias, email_address) + return changelog + + +def _any_existing(file_list): + return [f for f in file_list if os.path.exists(f)] + + +# Get requirements from the first file that exists +def get_reqs_from_files(requirements_files): + for requirements_file in _any_existing(requirements_files): + with open(requirements_file, 'r') as fil: + return fil.read().split('\n') + return [] + + +def parse_requirements(requirements_files=None): + + if requirements_files is None: + requirements_files = get_requirements_files() + + def egg_fragment(match): + # take a versioned egg fragment and return a + # versioned package requirement e.g. + # nova-1.2.3 becomes nova>=1.2.3 + return re.sub(r'([\w.]+)-([\w.-]+)', + r'\1>=\2', + match.group(1)) + + requirements = [] + for line in get_reqs_from_files(requirements_files): + # Ignore comments + if (not line.strip()) or line.startswith('#'): + continue + + # For the requirements list, we need to inject only the portion + # after egg= so that distutils knows the package it's looking for + # such as: + # -e git://github.com/openstack/nova/master#egg=nova + # -e git://github.com/openstack/nova/master#egg=nova-1.2.3 + if re.match(r'\s*-e\s+', line): + requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', + egg_fragment, + line)) + # such as: + # http://github.com/openstack/nova/zipball/master#egg=nova + # http://github.com/openstack/nova/zipball/master#egg=nova-1.2.3 + elif re.match(r'\s*https?:', line): + requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', + egg_fragment, + line)) + # -f lines are for index locations, and don't get used here + elif re.match(r'\s*-f\s+', line): + pass + elif line in BROKEN_ON_27 and sys.version_info >= (2, 7): + pass + else: + requirements.append(line) + + return requirements + + +def parse_dependency_links(requirements_files=None): + if requirements_files is None: + requirements_files = get_requirements_files() + dependency_links = [] + # dependency_links inject alternate locations to find packages listed + # in requirements + for line in get_reqs_from_files(requirements_files): + # skip comments and blank lines + if re.match(r'(\s*#)|(\s*$)', line): + continue + # lines with -e or -f need the whole line, minus the flag + if re.match(r'\s*-[ef]\s+', line): + dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line)) + # lines that are only urls can go in unmolested + elif re.match(r'\s*https?:', line): + dependency_links.append(line) + return dependency_links + + +def _run_shell_command(cmd, throw_on_error=False, buffer=True): + if buffer: + out_location = subprocess.PIPE + err_location = subprocess.PIPE + else: + out_location = None + err_location = None + + if os.name == 'nt': + output = subprocess.Popen(["cmd.exe", "/C", cmd], + stdout=out_location, + stderr=err_location) + else: + output = subprocess.Popen(["/bin/sh", "-c", cmd], + stdout=out_location, + stderr=err_location) + out = output.communicate() + if output.returncode and throw_on_error: + raise distutils.errors.DistutilsError( + "%s returned %d" % (cmd, output.returncode)) + if len(out) == 0 or not out[0] or not out[0].strip(): + return '' + return out[0].strip().decode('utf-8') + + +def _get_git_directory(): + return _run_shell_command("git rev-parse --git-dir", None) + + +def get_boolean_option(option_dict, option_name, env_name): + return ((option_name in option_dict + and option_dict[option_name][1].lower() in TRUE_VALUES) or + str(os.getenv(env_name)).lower() in TRUE_VALUES) + + +def write_git_changelog(git_dir=None, dest_dir=os.path.curdir, + option_dict=dict()): + """Write a changelog based on the git changelog.""" + should_skip = get_boolean_option(option_dict, 'skip_changelog', + 'SKIP_WRITE_GIT_CHANGELOG') + if not should_skip: + new_changelog = os.path.join(dest_dir, 'ChangeLog') + # If there's already a ChangeLog and it's not writable, just use it + if (os.path.exists(new_changelog) + and not os.access(new_changelog, os.W_OK)): + return + log.info('[pbr] Writing ChangeLog') + if git_dir is None: + git_dir = _get_git_directory() + if git_dir: + git_log_cmd = 'git --git-dir=%s log' % git_dir + changelog = _run_shell_command(git_log_cmd) + mailmap = read_git_mailmap() + with open(new_changelog, "wb") as changelog_file: + changelog_file.write(canonicalize_emails( + changelog, mailmap).encode('utf-8')) + + +def generate_authors(git_dir=None, dest_dir='.', option_dict=dict()): + """Create AUTHORS file using git commits.""" + should_skip = get_boolean_option(option_dict, 'skip_authors', + 'SKIP_GENERATE_AUTHORS') + if not should_skip: + old_authors = os.path.join(dest_dir, 'AUTHORS.in') + new_authors = os.path.join(dest_dir, 'AUTHORS') + # If there's already an AUTHORS file and it's not writable, just use it + if (os.path.exists(new_authors) + and not os.access(new_authors, os.W_OK)): + return + log.info('[pbr] Generating AUTHORS') + ignore_emails = '(jenkins@review|infra@lists)' + if git_dir is None: + git_dir = _get_git_directory() + if git_dir: + authors = [] + + # don't include jenkins email address in AUTHORS file + git_log_cmd = ("git --git-dir=" + git_dir + + " log --format='%aN <%aE>'" + " | egrep -v '" + ignore_emails + "'") + authors += _run_shell_command(git_log_cmd).split('\n') + + # get all co-authors from commit messages + co_authors_cmd = ("git log --git-dir=" + git_dir + + " | grep -i Co-authored-by:") + co_authors = _run_shell_command(co_authors_cmd) + + co_authors = [signed.split(":", 1)[1].strip() + for signed in co_authors.split('\n') if signed] + + authors += co_authors + + # canonicalize emails, remove duplicates and sort + mailmap = read_git_mailmap(git_dir) + authors = canonicalize_emails('\n'.join(authors), mailmap) + authors = authors.split('\n') + authors = sorted(set(authors)) + + with open(new_authors, 'wb') as new_authors_fh: + if os.path.exists(old_authors): + with open(old_authors, "rb") as old_authors_fh: + new_authors_fh.write(old_authors_fh.read()) + new_authors_fh.write(('\n'.join(authors) + '\n') + .encode('utf-8')) + + +def _find_git_files(dirname='', git_dir=None): + """Behave like a file finder entrypoint plugin. + + We don't actually use the entrypoints system for this because it runs + at absurd times. We only want to do this when we are building an sdist. + """ + file_list = [] + if git_dir is None: + git_dir = _get_git_directory() + if git_dir: + log.info("[pbr] In git context, generating filelist from git") + git_ls_cmd = "git --git-dir=%s ls-files -z" % git_dir + file_list = _run_shell_command(git_ls_cmd) + file_list = file_list.split(b'\x00'.decode('utf-8')) + return [f for f in file_list if f] + + +_rst_template = """%(heading)s +%(underline)s + +.. automodule:: %(module)s + :members: + :undoc-members: + :show-inheritance: +""" + + +def _find_modules(arg, dirname, files): + for filename in files: + if filename.endswith('.py') and filename != '__init__.py': + arg["%s.%s" % (dirname.replace('/', '.'), + filename[:-3])] = True + + +class LocalInstall(install.install): + """Runs python setup.py install in a sensible manner. + + Force a non-egg installed in the manner of + single-version-externally-managed, which allows us to install manpages + and config files. + + Because non-egg installs bypass the depend processing machinery, we + need to do our own. Because easy_install is evil, just use pip to + process our requirements files directly, which means we don't have to + do crazy extra processing. + + Bypass installation if --single-version-externally-managed is given, + so that behavior for packagers remains the same. + """ + + command_name = 'install' + + def run(self): + option_dict = self.distribution.get_option_dict('pbr') + if (not self.single_version_externally_managed + and self.distribution.install_requires): + links = _make_links_args(self.distribution.dependency_links) + _pip_install( + links, self.distribution.install_requires, self.root, + option_dict=option_dict) + + return du_install.install.run(self) + + +def _newer_requires_files(egg_info_dir): + """Check to see if any of the requires files are newer than egg-info.""" + for target, sources in (('requires.txt', get_requirements_files()), + ('test-requires.txt', TEST_REQUIREMENTS_FILES)): + target_path = os.path.join(egg_info_dir, target) + for src in _any_existing(sources): + if (not os.path.exists(target_path) or + os.path.getmtime(target_path) + < os.path.getmtime(src)): + return True + return False + + +def _copy_test_requires_to(egg_info_dir): + """Copy the requirements file to egg-info/test-requires.txt.""" + with open(os.path.join(egg_info_dir, 'test-requires.txt'), 'w') as dest: + for source in _any_existing(TEST_REQUIREMENTS_FILES): + dest.write(open(source, 'r').read().rstrip('\n') + '\n') + + +class _PipInstallTestRequires(object): + """Mixin class to install test-requirements.txt before running tests.""" + + def install_test_requirements(self): + + links = _make_links_args( + parse_dependency_links(TEST_REQUIREMENTS_FILES)) + if self.distribution.tests_require: + option_dict = self.distribution.get_option_dict('pbr') + _pip_install( + links, self.distribution.tests_require, + option_dict=option_dict) + + def pre_run(self): + self.egg_name = pkg_resources.safe_name(self.distribution.get_name()) + self.egg_info = "%s.egg-info" % pkg_resources.to_filename( + self.egg_name) + if (not os.path.exists(self.egg_info) or + _newer_requires_files(self.egg_info)): + ei_cmd = self.get_finalized_command('egg_info') + ei_cmd.run() + self.install_test_requirements() + _copy_test_requires_to(self.egg_info) + +try: + from pbr import testr_command + + class TestrTest(testr_command.Testr, _PipInstallTestRequires): + """Make setup.py test do the right thing.""" + + command_name = 'test' + + def run(self): + self.pre_run() + # Can't use super - base class old-style class + testr_command.Testr.run(self) + + _have_testr = True + +except ImportError: + _have_testr = False + + +def have_testr(): + return _have_testr + +try: + from nose import commands + + class NoseTest(commands.nosetests, _PipInstallTestRequires): + """Fallback test runner if testr is a no-go.""" + + command_name = 'test' + + def run(self): + self.pre_run() + # Can't use super - base class old-style class + commands.nosetests.run(self) + + _have_nose = True + +except ImportError: + _have_nose = False + + +def have_nose(): + return _have_nose + + +_script_text = """# PBR Generated from %(group)r + +import sys + +from %(module_name)s import %(import_target)s + + +if __name__ == "__main__": + sys.exit(%(invoke_target)s()) +""" + + +def override_get_script_args( + dist, executable=os.path.normpath(sys.executable), is_wininst=False): + """Override entrypoints console_script.""" + header = easy_install.get_script_header("", executable, is_wininst) + for group in 'console_scripts', 'gui_scripts': + for name, ep in dist.get_entry_map(group).items(): + if not ep.attrs or len(ep.attrs) > 2: + raise ValueError("Script targets must be of the form " + "'func' or 'Class.class_method'.") + script_text = _script_text % dict( + group=group, + module_name=ep.module_name, + import_target=ep.attrs[0], + invoke_target='.'.join(ep.attrs), + ) + yield (name, header+script_text) + + +class LocalInstallScripts(install_scripts.install_scripts): + """Intercepts console scripts entry_points.""" + command_name = 'install_scripts' + + def run(self): + if os.name != 'nt': + get_script_args = override_get_script_args + else: + get_script_args = easy_install.get_script_args + + import distutils.command.install_scripts + + self.run_command("egg_info") + if self.distribution.scripts: + # run first to set up self.outfiles + distutils.command.install_scripts.install_scripts.run(self) + else: + self.outfiles = [] + if self.no_ep: + # don't install entry point scripts into .egg file! + return + + ei_cmd = self.get_finalized_command("egg_info") + dist = pkg_resources.Distribution( + ei_cmd.egg_base, + pkg_resources.PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), + ei_cmd.egg_name, ei_cmd.egg_version, + ) + bs_cmd = self.get_finalized_command('build_scripts') + executable = getattr( + bs_cmd, 'executable', easy_install.sys_executable) + is_wininst = getattr( + self.get_finalized_command("bdist_wininst"), '_is_running', False + ) + for args in get_script_args(dist, executable, is_wininst): + self.write_script(*args) + + +class LocalManifestMaker(egg_info.manifest_maker): + """Add any files that are in git and some standard sensible files.""" + + def _add_pbr_defaults(self): + for template_line in [ + 'include AUTHORS', + 'include ChangeLog', + 'exclude .gitignore', + 'exclude .gitreview', + 'global-exclude *.pyc' + ]: + self.filelist.process_template_line(template_line) + + def add_defaults(self): + option_dict = self.distribution.get_option_dict('pbr') + + sdist.sdist.add_defaults(self) + self.filelist.append(self.template) + self.filelist.append(self.manifest) + self.filelist.extend(extra_files.get_extra_files()) + should_skip = get_boolean_option(option_dict, 'skip_git_sdist', + 'SKIP_GIT_SDIST') + if not should_skip: + rcfiles = _find_git_files() + if rcfiles: + self.filelist.extend(rcfiles) + elif os.path.exists(self.manifest): + self.read_manifest() + ei_cmd = self.get_finalized_command('egg_info') + self._add_pbr_defaults() + self.filelist.include_pattern("*", prefix=ei_cmd.egg_info) + + +class LocalEggInfo(egg_info.egg_info): + """Override the egg_info command to regenerate SOURCES.txt sensibly.""" + + command_name = 'egg_info' + + def find_sources(self): + """Generate SOURCES.txt only if there isn't one already. + + If we are in an sdist command, then we always want to update + SOURCES.txt. If we are not in an sdist command, then it doesn't + matter one flip, and is actually destructive. + """ + manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") + if not os.path.exists(manifest_filename) or 'sdist' in sys.argv: + log.info("[pbr] Processing SOURCES.txt") + mm = LocalManifestMaker(self.distribution) + mm.manifest = manifest_filename + mm.run() + self.filelist = mm.filelist + else: + log.info("[pbr] Reusing existing SOURCES.txt") + self.filelist = egg_info.FileList() + for entry in open(manifest_filename, 'r').read().split('\n'): + self.filelist.append(entry) + + +class LocalSDist(sdist.sdist): + """Builds the ChangeLog and Authors files from VC first.""" + + command_name = 'sdist' + + def run(self): + option_dict = self.distribution.get_option_dict('pbr') + write_git_changelog(option_dict=option_dict) + generate_authors(option_dict=option_dict) + # sdist.sdist is an old style class, can't use super() + sdist.sdist.run(self) + +try: + from sphinx import apidoc + from sphinx import application + from sphinx import config + from sphinx import setup_command + + class LocalBuildDoc(setup_command.BuildDoc): + + command_name = 'build_sphinx' + builders = ['html', 'man'] + + def _get_source_dir(self): + option_dict = self.distribution.get_option_dict('build_sphinx') + if 'source_dir' in option_dict: + source_dir = os.path.join(option_dict['source_dir'][1], 'api') + else: + source_dir = 'doc/source/api' + if not os.path.exists(source_dir): + os.makedirs(source_dir) + return source_dir + + def generate_autoindex(self): + log.info("[pbr] Autodocumenting from %s" + % os.path.abspath(os.curdir)) + modules = {} + source_dir = self._get_source_dir() + for pkg in self.distribution.packages: + if '.' not in pkg: + for dirpath, dirnames, files in os.walk(pkg): + _find_modules(modules, dirpath, files) + module_list = list(modules.keys()) + module_list.sort() + autoindex_filename = os.path.join(source_dir, 'autoindex.rst') + with open(autoindex_filename, 'w') as autoindex: + autoindex.write(""".. toctree:: + :maxdepth: 1 + + """) + for module in module_list: + output_filename = os.path.join(source_dir, + "%s.rst" % module) + heading = "The :mod:`%s` Module" % module + underline = "=" * len(heading) + values = dict(module=module, heading=heading, + underline=underline) + + log.info("[pbr] Generating %s" + % output_filename) + with open(output_filename, 'w') as output_file: + output_file.write(_rst_template % values) + autoindex.write(" %s.rst\n" % module) + + def _sphinx_tree(self): + source_dir = self._get_source_dir() + apidoc.main(['apidoc', '.', '-H', 'Modules', '-o', source_dir]) + + def _sphinx_run(self): + if not self.verbose: + status_stream = io.StringIO() + else: + status_stream = sys.stdout + confoverrides = {} + if self.version: + confoverrides['version'] = self.version + if self.release: + confoverrides['release'] = self.release + if self.today: + confoverrides['today'] = self.today + sphinx_config = config.Config(self.config_dir, 'conf.py', {}, []) + if self.builder == 'man' and len(sphinx_config.man_pages) == 0: + return + app = application.Sphinx( + self.source_dir, self.config_dir, + self.builder_target_dir, self.doctree_dir, + self.builder, confoverrides, status_stream, + freshenv=self.fresh_env, warningiserror=True) + + try: + app.build(force_all=self.all_files) + except Exception as err: + from docutils import utils + if isinstance(err, utils.SystemMessage): + sys.stder.write('reST markup error:\n') + sys.stderr.write(err.args[0].encode('ascii', + 'backslashreplace')) + sys.stderr.write('\n') + else: + raise + + if self.link_index: + src = app.config.master_doc + app.builder.out_suffix + dst = app.builder.get_outfilename('index') + os.symlink(src, dst) + + def run(self): + option_dict = self.distribution.get_option_dict('pbr') + tree_index = get_boolean_option(option_dict, + 'autodoc_tree_index_modules', + 'AUTODOC_TREE_INDEX_MODULES') + auto_index = get_boolean_option(option_dict, + 'autodoc_index_modules', + 'AUTODOC_INDEX_MODULES') + if not os.getenv('SPHINX_DEBUG'): + #NOTE(afazekas): These options can be used together, + # but they do a very similar thing in a difffernet way + if tree_index: + self._sphinx_tree() + if auto_index: + self.generate_autoindex() + + for builder in self.builders: + self.builder = builder + self.finalize_options() + self.project = self.distribution.get_name() + self.version = self.distribution.get_version() + self.release = self.distribution.get_version() + if 'warnerrors' in option_dict: + self._sphinx_run() + else: + setup_command.BuildDoc.run(self) + + class LocalBuildLatex(LocalBuildDoc): + builders = ['latex'] + command_name = 'build_sphinx_latex' + + _have_sphinx = True + +except ImportError: + _have_sphinx = False + + +def have_sphinx(): + return _have_sphinx + + +def _get_revno(git_dir): + """Return the number of commits since the most recent tag. + + We use git-describe to find this out, but if there are no + tags then we fall back to counting commits since the beginning + of time. + """ + describe = _run_shell_command( + "git --git-dir=%s describe --always" % git_dir) + if "-" in describe: + return describe.rsplit("-", 2)[-2] + + # no tags found + revlist = _run_shell_command( + "git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir) + return len(revlist.splitlines()) + + +def _get_version_from_git(pre_version): + """Return a version which is equal to the tag that's on the current + revision if there is one, or tag plus number of additional revisions + if the current revision has no tag. + """ + + git_dir = _get_git_directory() + if git_dir: + if pre_version: + try: + return _run_shell_command( + "git --git-dir=" + git_dir + " describe --exact-match", + throw_on_error=True).replace('-', '.') + except Exception: + sha = _run_shell_command( + "git --git-dir=" + git_dir + " log -n1 --pretty=format:%h") + return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha) + else: + return _run_shell_command( + "git --git-dir=" + git_dir + " describe --always").replace( + '-', '.') + return None + + +def _get_version_from_pkg_info(package_name): + """Get the version from PKG-INFO file if we can.""" + try: + pkg_info_file = open('PKG-INFO', 'r') + except (IOError, OSError): + return None + try: + pkg_info = email.message_from_file(pkg_info_file) + except email.MessageError: + return None + # Check to make sure we're in our own dir + if pkg_info.get('Name', None) != package_name: + return None + return pkg_info.get('Version', None) + + +def get_version(package_name, pre_version=None): + """Get the version of the project. First, try getting it from PKG-INFO, if + it exists. If it does, that means we're in a distribution tarball or that + install has happened. Otherwise, if there is no PKG-INFO file, pull the + version from git. + + We do not support setup.py version sanity in git archive tarballs, nor do + we support packagers directly sucking our git repo into theirs. We expect + that a source tarball be made from our git repo - or that if someone wants + to make a source tarball from a fork of our repo with additional tags in it + that they understand and desire the results of doing that. + """ + version = os.environ.get( + "PBR_VERSION", + os.environ.get("OSLO_PACKAGE_VERSION", None)) + if version: + return version + version = _get_version_from_pkg_info(package_name) + if version: + return version + version = _get_version_from_git(pre_version) + if version: + return version + raise Exception("Versioning for this project requires either an sdist" + " tarball, or access to an upstream git repository.") diff --git a/awx/lib/site-packages/pbr/testr_command.py b/awx/lib/site-packages/pbr/testr_command.py new file mode 100644 index 0000000000..23699dcd93 --- /dev/null +++ b/awx/lib/site-packages/pbr/testr_command.py @@ -0,0 +1,107 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (c) 2013 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""setuptools/distutils commands to run testr via setup.py + +Currently provides 'testr' which runs tests using testr. You can pass +--coverage which will also export PYTHON='coverage run --source ' +and automatically combine the coverage from each testr backend test runner +after the run completes. + +To use, just use setuptools/distribute and depend on testr, and it should be +picked up automatically (as the commands are exported in the testrepository +package metadata. +""" + +from distutils import cmd +import distutils.errors +import os +import sys + +from testrepository import commands + + +class Testr(cmd.Command): + + description = "Run unit tests using testr" + + user_options = [ + ('coverage', None, "Replace PYTHON with coverage and merge coverage " + "from each testr worker."), + ('testr-args=', 't', "Run 'testr' with these args"), + ('omit=', 'o', 'Files to omit from coverage calculations'), + ('slowest', None, "Show slowest test times after tests complete."), + ] + + boolean_options = ['coverage', 'slowest'] + + def _run_testr(self, *args): + return commands.run_argv([sys.argv[0]] + list(args), + sys.stdin, sys.stdout, sys.stderr) + + def initialize_options(self): + self.testr_args = None + self.coverage = None + self.omit = "" + self.slowest = None + + def finalize_options(self): + if self.testr_args is None: + self.testr_args = [] + else: + self.testr_args = self.testr_args.split() + if self.omit: + self.omit = "--omit=%s" % self.omit + + def run(self): + """Set up testr repo, then run testr""" + if not os.path.isdir(".testrepository"): + self._run_testr("init") + + if self.coverage: + self._coverage_before() + testr_ret = self._run_testr("run", "--parallel", *self.testr_args) + if testr_ret: + raise distutils.errors.DistutilsError( + "testr failed (%d)" % testr_ret) + if self.slowest: + print("Slowest Tests") + self._run_testr("slowest") + if self.coverage: + self._coverage_after() + + def _coverage_before(self): + package = self.distribution.get_name() + if package.startswith('python-'): + package = package[7:] + options = "--source %s --parallel-mode" % package + os.environ['PYTHON'] = ("coverage run %s" % options) + + def _coverage_after(self): + os.system("coverage combine") + os.system("coverage html -d ./cover %s" % self.omit) diff --git a/awx/lib/site-packages/pbr/tests/__init__.py b/awx/lib/site-packages/pbr/tests/__init__.py new file mode 100644 index 0000000000..8cffb4e4c1 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/__init__.py @@ -0,0 +1,133 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +"""Common utilities used in testing""" + +import os +import shutil +import subprocess +import sys + +import fixtures +import testresources +import testtools + +from pbr import packaging + + +class DiveDir(fixtures.Fixture): + """Dive into given directory and return back on cleanup. + + :ivar path: The target directory. + """ + + def __init__(self, path): + self.path = path + + def setUp(self): + super(DiveDir, self).setUp() + self.addCleanup(os.chdir, os.getcwd()) + os.chdir(self.path) + + +class BaseTestCase(testtools.TestCase, testresources.ResourcedTestCase): + + def setUp(self): + super(BaseTestCase, self).setUp() + test_timeout = os.environ.get('OS_TEST_TIMEOUT', 30) + try: + test_timeout = int(test_timeout) + except ValueError: + # If timeout value is invalid, fail hard. + print("OS_TEST_TIMEOUT set to invalid value" + " defaulting to no timeout") + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + + if os.environ.get('OS_STDOUT_CAPTURE') in packaging.TRUE_VALUES: + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if os.environ.get('OS_STDERR_CAPTURE') in packaging.TRUE_VALUES: + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + self.log_fixture = self.useFixture( + fixtures.FakeLogger('pbr')) + + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.FakeLogger()) + self.useFixture(fixtures.EnvironmentVariable('PBR_VERSION', '0.0')) + + self.temp_dir = self.useFixture(fixtures.TempDir()).path + self.package_dir = os.path.join(self.temp_dir, 'testpackage') + shutil.copytree(os.path.join(os.path.dirname(__file__), 'testpackage'), + self.package_dir) + self.addCleanup(os.chdir, os.getcwd()) + os.chdir(self.package_dir) + + def tearDown(self): + # Remove pbr.testpackage from sys.modules so that it can be freshly + # re-imported by the next test + for k in list(sys.modules): + if (k == 'pbr_testpackage' or + k.startswith('pbr_testpackage.')): + del sys.modules[k] + super(BaseTestCase, self).tearDown() + + def run_setup(self, *args): + return self._run_cmd(sys.executable, ('setup.py',) + args) + + def _run_cmd(self, cmd, args=[]): + """Run a command in the root of the test working copy. + + Runs a command, with the given argument list, in the root of the test + working copy--returns the stdout and stderr streams and the exit code + from the subprocess. + """ + + os.chdir(self.package_dir) + p = subprocess.Popen([cmd] + list(args), stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + streams = tuple(s.decode('latin1').strip() for s in p.communicate()) + for line in streams: + print(line) + return (streams) + (p.returncode,) diff --git a/awx/lib/site-packages/pbr/tests/test_commands.py b/awx/lib/site-packages/pbr/tests/test_commands.py new file mode 100644 index 0000000000..f5831fac8a --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/test_commands.py @@ -0,0 +1,58 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +from testtools import content + +from pbr import tests + + +class TestCommands(tests.BaseTestCase): + def test_custom_build_py_command(self): + """Test custom build_py command. + + Test that a custom subclass of the build_py command runs when listed in + the commands [global] option, rather than the normal build command. + """ + + stdout, stderr, return_code = self.run_setup('build_py') + self.addDetail('stdout', content.text_content(stdout)) + self.addDetail('stderr', content.text_content(stderr)) + self.assertIn('Running custom build_py command.', stdout) + self.assertEqual(return_code, 0) diff --git a/awx/lib/site-packages/pbr/tests/test_core.py b/awx/lib/site-packages/pbr/tests/test_core.py new file mode 100644 index 0000000000..dd21e6f04b --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/test_core.py @@ -0,0 +1,147 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +import glob +import os +import tarfile + +import fixtures + +from pbr import tests + + +class TestCore(tests.BaseTestCase): + + cmd_names = ('pbr_test_cmd', 'pbr_test_cmd_with_class') + + def check_script_install(self, install_stdout): + for cmd_name in self.cmd_names: + install_txt = 'Installing %s script to %s' % (cmd_name, + self.temp_dir) + self.assertIn(install_txt, install_stdout) + + cmd_filename = os.path.join(self.temp_dir, cmd_name) + + script_txt = open(cmd_filename, 'r').read() + self.assertNotIn('pkg_resources', script_txt) + + stdout, _, return_code = self._run_cmd(cmd_filename) + self.assertIn("PBR", stdout) + + def test_setup_py_keywords(self): + """setup.py --keywords. + + Test that the `./setup.py --keywords` command returns the correct + value without balking. + """ + + self.run_setup('egg_info') + stdout, _, _ = self.run_setup('--keywords') + assert stdout == 'packaging,distutils,setuptools' + + def test_sdist_extra_files(self): + """Test that the extra files are correctly added.""" + + stdout, _, return_code = self.run_setup('sdist', '--formats=gztar') + + # There can be only one + try: + tf_path = glob.glob(os.path.join('dist', '*.tar.gz'))[0] + except IndexError: + assert False, 'source dist not found' + + tf = tarfile.open(tf_path) + names = ['/'.join(p.split('/')[1:]) for p in tf.getnames()] + + self.assertIn('extra-file.txt', names) + + def test_console_script_install(self): + """Test that we install a non-pkg-resources console script.""" + + if os.name == 'nt': + self.skipTest('Windows support is passthrough') + + stdout, _, return_code = self.run_setup( + 'install_scripts', '--install-dir=%s' % self.temp_dir) + + self.useFixture( + fixtures.EnvironmentVariable('PYTHONPATH', '.')) + + self.check_script_install(stdout) + + def test_console_script_develop(self): + """Test that we develop a non-pkg-resources console script.""" + + if os.name == 'nt': + self.skipTest('Windows support is passthrough') + + self.useFixture( + fixtures.EnvironmentVariable( + 'PYTHONPATH', ".:%s" % self.temp_dir)) + + stdout, _, return_code = self.run_setup( + 'develop', '--install-dir=%s' % self.temp_dir) + + self.check_script_install(stdout) + + +class TestGitSDist(tests.BaseTestCase): + + def setUp(self): + super(TestGitSDist, self).setUp() + + stdout, _, return_code = self._run_cmd('git', ('init',)) + if return_code: + self.skipTest("git not installed") + + stdout, _, return_code = self._run_cmd('git', ('add', '.')) + stdout, _, return_code = self._run_cmd( + 'git', ('commit', '-m', 'Turn this into a git repo')) + + stdout, _, return_code = self.run_setup('sdist', '--formats=gztar') + + def test_sdist_git_extra_files(self): + """Test that extra files found in git are correctly added.""" + # There can be only one + tf_path = glob.glob(os.path.join('dist', '*.tar.gz'))[0] + tf = tarfile.open(tf_path) + names = ['/'.join(p.split('/')[1:]) for p in tf.getnames()] + + self.assertIn('git-extra-file.txt', names) diff --git a/awx/lib/site-packages/pbr/tests/test_files.py b/awx/lib/site-packages/pbr/tests/test_files.py new file mode 100644 index 0000000000..38053397e6 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/test_files.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import os + +import fixtures + +from pbr.hooks import files +from pbr import tests + + +class FilesConfigTest(tests.BaseTestCase): + + def setUp(self): + super(FilesConfigTest, self).setUp() + + pkg_fixture = fixtures.PythonPackage( + "fake_package", [ + ("fake_module.py", b""), + ("other_fake_module.py", b""), + ]) + self.useFixture(pkg_fixture) + pkg_etc = os.path.join(pkg_fixture.base, 'etc') + pkg_sub = os.path.join(pkg_etc, 'sub') + subpackage = os.path.join( + pkg_fixture.base, 'fake_package', 'subpackage') + os.makedirs(pkg_sub) + os.makedirs(subpackage) + with open(os.path.join(pkg_etc, "foo"), 'w') as foo_file: + foo_file.write("Foo Data") + with open(os.path.join(pkg_sub, "bar"), 'w') as foo_file: + foo_file.write("Bar Data") + with open(os.path.join(subpackage, "__init__.py"), 'w') as foo_file: + foo_file.write("# empty") + + self.useFixture(tests.DiveDir(pkg_fixture.base)) + + def test_implicit_auto_package(self): + config = dict( + files=dict( + ) + ) + files.FilesConfig(config, 'fake_package').run() + self.assertIn('subpackage', config['files']['packages']) + + def test_auto_package(self): + config = dict( + files=dict( + packages='fake_package', + ) + ) + files.FilesConfig(config, 'fake_package').run() + self.assertIn('subpackage', config['files']['packages']) + + def test_data_files_globbing(self): + config = dict( + files=dict( + data_files="\n etc/pbr = etc/*" + ) + ) + files.FilesConfig(config, 'fake_package').run() + self.assertIn( + '\netc/pbr/ = \n etc/foo\netc/pbr/sub = \n etc/sub/bar', + config['files']['data_files']) diff --git a/awx/lib/site-packages/pbr/tests/test_hooks.py b/awx/lib/site-packages/pbr/tests/test_hooks.py new file mode 100644 index 0000000000..88106a6f27 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/test_hooks.py @@ -0,0 +1,91 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +import os +import textwrap + +from pbr import tests +from pbr.tests import util + + +class TestHooks(tests.BaseTestCase): + def setUp(self): + super(TestHooks, self).setUp() + with util.open_config( + os.path.join(self.package_dir, 'setup.cfg')) as cfg: + cfg.set('global', 'setup-hooks', + 'pbr_testpackage._setup_hooks.test_hook_1\n' + 'pbr_testpackage._setup_hooks.test_hook_2') + cfg.set('build_ext', 'pre-hook.test_pre_hook', + 'pbr_testpackage._setup_hooks.test_pre_hook') + cfg.set('build_ext', 'post-hook.test_post_hook', + 'pbr_testpackage._setup_hooks.test_post_hook') + + def test_global_setup_hooks(self): + """Test setup_hooks. + + Test that setup_hooks listed in the [global] section of setup.cfg are + executed in order. + """ + + stdout, _, return_code = self.run_setup('egg_info') + assert 'test_hook_1\ntest_hook_2' in stdout + assert return_code == 0 + + def test_command_hooks(self): + """Test command hooks. + + Simple test that the appropriate command hooks run at the + beginning/end of the appropriate command. + """ + + stdout, _, return_code = self.run_setup('egg_info') + assert 'build_ext pre-hook' not in stdout + assert 'build_ext post-hook' not in stdout + assert return_code == 0 + + stdout, _, return_code = self.run_setup('build_ext') + assert textwrap.dedent(""" + running build_ext + running pre_hook pbr_testpackage._setup_hooks.test_pre_hook for command build_ext + build_ext pre-hook + """) in stdout # flake8: noqa + assert stdout.endswith('build_ext post-hook') + assert return_code == 0 diff --git a/awx/lib/site-packages/pbr/tests/test_packaging.py b/awx/lib/site-packages/pbr/tests/test_packaging.py new file mode 100644 index 0000000000..562b10e330 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/test_packaging.py @@ -0,0 +1,106 @@ +# Copyright (c) 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +import os + +from pbr import tests + + +class TestPackagingInGitRepoWithCommit(tests.BaseTestCase): + + def setUp(self): + super(TestPackagingInGitRepoWithCommit, self).setUp() + self._run_cmd('git', ['init', '.']) + self._run_cmd('git', ['add', '.']) + self._run_cmd('git', ['commit', '-m', 'test commit']) + self.run_setup('sdist') + return + + def test_authors(self): + # One commit, something should be in the authors list + with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f: + body = f.read() + self.assertNotEqual(body, '') + + def test_changelog(self): + with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f: + body = f.read() + # One commit, something should be in the ChangeLog list + self.assertNotEqual(body, '') + + +class TestPackagingInGitRepoWithoutCommit(tests.BaseTestCase): + + def setUp(self): + super(TestPackagingInGitRepoWithoutCommit, self).setUp() + self._run_cmd('git', ['init', '.']) + self._run_cmd('git', ['add', '.']) + self.run_setup('sdist') + return + + def test_authors(self): + # No commits, no authors in list + with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f: + body = f.read() + self.assertEqual(body, '\n') + + def test_changelog(self): + # No commits, nothing should be in the ChangeLog list + with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f: + body = f.read() + self.assertEqual(body, '') + + +class TestPackagingInPlainDirectory(tests.BaseTestCase): + + def setUp(self): + super(TestPackagingInPlainDirectory, self).setUp() + self.run_setup('sdist') + return + + def test_authors(self): + # Not a git repo, no AUTHORS file created + filename = os.path.join(self.package_dir, 'AUTHORS') + self.assertFalse(os.path.exists(filename)) + + def test_changelog(self): + # Not a git repo, no ChangeLog created + filename = os.path.join(self.package_dir, 'ChangeLog') + self.assertFalse(os.path.exists(filename)) diff --git a/awx/lib/site-packages/pbr/tests/test_setup.py b/awx/lib/site-packages/pbr/tests/test_setup.py new file mode 100644 index 0000000000..11f9a049dd --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/test_setup.py @@ -0,0 +1,360 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import os +import sys +import tempfile + +try: + import cStringIO as io + BytesIO = io.StringIO +except ImportError: + import io + BytesIO = io.BytesIO + +import fixtures +import testscenarios + +from pbr import packaging +from pbr import tests + + +class EmailTestCase(tests.BaseTestCase): + + def test_str_dict_replace(self): + string = 'Johnnie T. Hozer' + mapping = {'T.': 'The'} + self.assertEqual('Johnnie The Hozer', + packaging.canonicalize_emails(string, mapping)) + + +class MailmapTestCase(tests.BaseTestCase): + + def setUp(self): + super(MailmapTestCase, self).setUp() + self.root_dir = self.useFixture(fixtures.TempDir()).path + self.mailmap = os.path.join(self.root_dir, '.mailmap') + + def test_mailmap_with_fullname(self): + with open(self.mailmap, 'w') as mm_fh: + mm_fh.write("Foo Bar Foo Bar \n") + self.assertEqual({'': ''}, + packaging.read_git_mailmap(self.root_dir)) + + def test_mailmap_with_firstname(self): + with open(self.mailmap, 'w') as mm_fh: + mm_fh.write("Foo Foo \n") + self.assertEqual({'': ''}, + packaging.read_git_mailmap(self.root_dir)) + + def test_mailmap_with_noname(self): + with open(self.mailmap, 'w') as mm_fh: + mm_fh.write(" \n") + self.assertEqual({'': ''}, + packaging.read_git_mailmap(self.root_dir)) + + +class SkipFileWrites(tests.BaseTestCase): + + scenarios = [ + ('changelog_option_true', + dict(option_key='skip_changelog', option_value='True', + env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None, + pkg_func=packaging.write_git_changelog, filename='ChangeLog')), + ('changelog_option_false', + dict(option_key='skip_changelog', option_value='False', + env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None, + pkg_func=packaging.write_git_changelog, filename='ChangeLog')), + ('changelog_env_true', + dict(option_key='skip_changelog', option_value='False', + env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True', + pkg_func=packaging.write_git_changelog, filename='ChangeLog')), + ('changelog_both_true', + dict(option_key='skip_changelog', option_value='True', + env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True', + pkg_func=packaging.write_git_changelog, filename='ChangeLog')), + ('authors_option_true', + dict(option_key='skip_authors', option_value='True', + env_key='SKIP_GENERATE_AUTHORS', env_value=None, + pkg_func=packaging.generate_authors, filename='AUTHORS')), + ('authors_option_false', + dict(option_key='skip_authors', option_value='False', + env_key='SKIP_GENERATE_AUTHORS', env_value=None, + pkg_func=packaging.generate_authors, filename='AUTHORS')), + ('authors_env_true', + dict(option_key='skip_authors', option_value='False', + env_key='SKIP_GENERATE_AUTHORS', env_value='True', + pkg_func=packaging.generate_authors, filename='AUTHORS')), + ('authors_both_true', + dict(option_key='skip_authors', option_value='True', + env_key='SKIP_GENERATE_AUTHORS', env_value='True', + pkg_func=packaging.generate_authors, filename='AUTHORS')), + ] + + def setUp(self): + super(SkipFileWrites, self).setUp() + self.temp_path = self.useFixture(fixtures.TempDir()).path + self.root_dir = os.path.abspath(os.path.curdir) + self.git_dir = os.path.join(self.root_dir, ".git") + if not os.path.exists(self.git_dir): + self.skipTest("%s is missing; skipping git-related checks" + % self.git_dir) + return + self.filename = os.path.join(self.temp_path, self.filename) + self.option_dict = dict() + if self.option_key is not None: + self.option_dict[self.option_key] = ('setup.cfg', + self.option_value) + self.useFixture( + fixtures.EnvironmentVariable(self.env_key, self.env_value)) + + def test_skip(self): + self.pkg_func(git_dir=self.git_dir, + dest_dir=self.temp_path, + option_dict=self.option_dict) + self.assertEqual( + not os.path.exists(self.filename), + (self.option_value.lower() in packaging.TRUE_VALUES + or self.env_value is not None)) + + +class GitLogsTest(tests.BaseTestCase): + + def setUp(self): + super(GitLogsTest, self).setUp() + self.temp_path = self.useFixture(fixtures.TempDir()).path + self.root_dir = os.path.abspath(os.path.curdir) + self.git_dir = os.path.join(self.root_dir, ".git") + self.useFixture( + fixtures.EnvironmentVariable('SKIP_GENERATE_AUTHORS')) + self.useFixture( + fixtures.EnvironmentVariable('SKIP_WRITE_GIT_CHANGELOG')) + + def test_write_git_changelog(self): + exist_files = [os.path.join(self.root_dir, f) + for f in (".git", ".mailmap")] + self.useFixture(fixtures.MonkeyPatch( + "os.path.exists", + lambda path: os.path.abspath(path) in exist_files)) + self.useFixture(fixtures.FakePopen(lambda _: { + "stdout": BytesIO("Author: Foo Bar " + "\n".encode('utf-8')) + })) + + def _fake_read_git_mailmap(*args): + return {"email@bar.com": "email@foo.com"} + + self.useFixture(fixtures.MonkeyPatch("pbr.packaging.read_git_mailmap", + _fake_read_git_mailmap)) + + packaging.write_git_changelog(git_dir=self.git_dir, + dest_dir=self.temp_path) + + with open(os.path.join(self.temp_path, "ChangeLog"), "r") as ch_fh: + self.assertTrue("email@foo.com" in ch_fh.read()) + + def _fake_log_output(self, cmd, mapping): + for (k, v) in mapping.items(): + if cmd.startswith(k): + return v.encode('utf-8') + return b"" + + def test_generate_authors(self): + author_old = "Foo Foo " + author_new = "Bar Bar " + co_author = "Foo Bar " + co_author_by = "Co-authored-by: " + co_author + + git_log_cmd = ("git --git-dir=%s log --format" % self.git_dir) + git_co_log_cmd = ("git log --git-dir=%s" % self.git_dir) + git_top_level = "git rev-parse --show-toplevel" + cmd_map = { + git_log_cmd: author_new, + git_co_log_cmd: co_author_by, + git_top_level: self.root_dir, + } + + exist_files = [self.git_dir, + os.path.join(self.temp_path, "AUTHORS.in")] + self.useFixture(fixtures.MonkeyPatch( + "os.path.exists", + lambda path: os.path.abspath(path) in exist_files)) + + self.useFixture(fixtures.FakePopen(lambda proc_args: { + "stdout": BytesIO( + self._fake_log_output(proc_args["args"][2], cmd_map)) + })) + + with open(os.path.join(self.temp_path, "AUTHORS.in"), "w") as auth_fh: + auth_fh.write("%s\n" % author_old) + + packaging.generate_authors(git_dir=self.git_dir, + dest_dir=self.temp_path) + + with open(os.path.join(self.temp_path, "AUTHORS"), "r") as auth_fh: + authors = auth_fh.read() + self.assertTrue(author_old in authors) + self.assertTrue(author_new in authors) + self.assertTrue(co_author in authors) + + +class BuildSphinxTest(tests.BaseTestCase): + + scenarios = [ + ('true_autodoc_caps', + dict(has_opt=True, autodoc='True', has_autodoc=True)), + ('true_autodoc_lower', + dict(has_opt=True, autodoc='true', has_autodoc=True)), + ('false_autodoc', + dict(has_opt=True, autodoc='False', has_autodoc=False)), + ('no_autodoc', + dict(has_opt=False, autodoc='False', has_autodoc=False)), + ] + + def setUp(self): + super(BuildSphinxTest, self).setUp() + + self.useFixture(fixtures.MonkeyPatch( + "sphinx.setup_command.BuildDoc.run", lambda self: None)) + from distutils import dist + self.distr = dist.Distribution() + self.distr.packages = ("fake_package",) + self.distr.command_options["build_sphinx"] = { + "source_dir": ["a", "."]} + pkg_fixture = fixtures.PythonPackage( + "fake_package", [("fake_module.py", b"")]) + self.useFixture(pkg_fixture) + self.useFixture(tests.DiveDir(pkg_fixture.base)) + + def test_build_doc(self): + if self.has_opt: + self.distr.command_options["pbr"] = { + "autodoc_index_modules": ('setup.cfg', self.autodoc)} + build_doc = packaging.LocalBuildDoc(self.distr) + build_doc.run() + + self.assertTrue( + os.path.exists("api/autoindex.rst") == self.has_autodoc) + self.assertTrue( + os.path.exists( + "api/fake_package.fake_module.rst") == self.has_autodoc) + + +class ParseRequirementsTest(tests.BaseTestCase): + + def setUp(self): + super(ParseRequirementsTest, self).setUp() + (fd, self.tmp_file) = tempfile.mkstemp(prefix='openstack', + suffix='.setup') + + def test_parse_requirements_normal(self): + with open(self.tmp_file, 'w') as fh: + fh.write("foo\nbar") + self.assertEqual(['foo', 'bar'], + packaging.parse_requirements([self.tmp_file])) + + def test_parse_requirements_with_git_egg_url(self): + with open(self.tmp_file, 'w') as fh: + fh.write("-e git://foo.com/zipball#egg=bar") + self.assertEqual(['bar'], + packaging.parse_requirements([self.tmp_file])) + + def test_parse_requirements_with_versioned_git_egg_url(self): + with open(self.tmp_file, 'w') as fh: + fh.write("-e git://foo.com/zipball#egg=bar-1.2.4") + self.assertEqual(['bar>=1.2.4'], + packaging.parse_requirements([self.tmp_file])) + + def test_parse_requirements_with_http_egg_url(self): + with open(self.tmp_file, 'w') as fh: + fh.write("https://foo.com/zipball#egg=bar") + self.assertEqual(['bar'], + packaging.parse_requirements([self.tmp_file])) + + def test_parse_requirements_with_versioned_http_egg_url(self): + with open(self.tmp_file, 'w') as fh: + fh.write("https://foo.com/zipball#egg=bar-4.2.1") + self.assertEqual(['bar>=4.2.1'], + packaging.parse_requirements([self.tmp_file])) + + def test_parse_requirements_removes_index_lines(self): + with open(self.tmp_file, 'w') as fh: + fh.write("-f foobar") + self.assertEqual([], packaging.parse_requirements([self.tmp_file])) + + def test_parse_requirements_removes_argparse(self): + with open(self.tmp_file, 'w') as fh: + fh.write("argparse") + if sys.version_info >= (2, 7): + self.assertEqual([], packaging.parse_requirements([self.tmp_file])) + + def test_parse_requirements_override_with_env(self): + with open(self.tmp_file, 'w') as fh: + fh.write("foo\nbar") + self.useFixture( + fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES', + self.tmp_file)) + self.assertEqual(['foo', 'bar'], + packaging.parse_requirements()) + + def test_parse_requirements_override_with_env_multiple_files(self): + with open(self.tmp_file, 'w') as fh: + fh.write("foo\nbar") + self.useFixture( + fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES', + "no-such-file," + self.tmp_file)) + self.assertEqual(['foo', 'bar'], + packaging.parse_requirements()) + + def test_get_requirement_from_file_empty(self): + actual = packaging.get_reqs_from_files([]) + self.assertEqual([], actual) + + def test_parse_requirements_with_comments(self): + with open(self.tmp_file, 'w') as fh: + fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz") + self.assertEqual(['foobar', 'foobaz'], + packaging.parse_requirements([self.tmp_file])) + + +class ParseDependencyLinksTest(tests.BaseTestCase): + + def setUp(self): + super(ParseDependencyLinksTest, self).setUp() + (fd, self.tmp_file) = tempfile.mkstemp(prefix="openstack", + suffix=".setup") + + def test_parse_dependency_normal(self): + with open(self.tmp_file, "w") as fh: + fh.write("http://test.com\n") + self.assertEqual( + ["http://test.com"], + packaging.parse_dependency_links([self.tmp_file])) + + def test_parse_dependency_with_git_egg_url(self): + with open(self.tmp_file, "w") as fh: + fh.write("-e git://foo.com/zipball#egg=bar") + self.assertEqual( + ["git://foo.com/zipball#egg=bar"], + packaging.parse_dependency_links([self.tmp_file])) + + +def load_tests(loader, in_tests, pattern): + return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern) diff --git a/awx/lib/site-packages/pbr/tests/test_version.py b/awx/lib/site-packages/pbr/tests/test_version.py new file mode 100644 index 0000000000..7ef908bc3a --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/test_version.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# Copyright 2012-2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from pbr import tests +from pbr import version + + +class DeferredVersionTestCase(tests.BaseTestCase): + + def test_cached_version(self): + class MyVersionInfo(version.VersionInfo): + def _get_version_from_pkg_resources(self): + return "5.5.5.5" + + deferred_string = MyVersionInfo("openstack").\ + cached_version_string() + self.assertEqual("5.5.5.5", deferred_string) diff --git a/awx/lib/site-packages/pbr/tests/testpackage/CHANGES.txt b/awx/lib/site-packages/pbr/tests/testpackage/CHANGES.txt new file mode 100644 index 0000000000..709b9d4c80 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/CHANGES.txt @@ -0,0 +1,86 @@ +Changelog +=========== + +0.3 (unreleased) +------------------ + +- The ``glob_data_files`` hook became a pre-command hook for the install_data + command instead of being a setup-hook. This is to support the additional + functionality of requiring data_files with relative destination paths to be + install relative to the package's install path (i.e. site-packages). + +- Dropped support for and deprecated the easier_install custom command. + Although it should still work, it probably won't be used anymore for + stsci_python packages. + +- Added support for the ``build_optional_ext`` command, which replaces/extends + the default ``build_ext`` command. See the README for more details. + +- Added the ``tag_svn_revision`` setup_hook as a replacement for the + setuptools-specific tag_svn_revision option to the egg_info command. This + new hook is easier to use than the old tag_svn_revision option: It's + automatically enabled by the presence of ``.dev`` in the version string, and + disabled otherwise. + +- The ``svn_info_pre_hook`` and ``svn_info_post_hook`` have been replaced with + ``version_pre_command_hook`` and ``version_post_command_hook`` respectively. + However, a new ``version_setup_hook``, which has the same purpose, has been + added. It is generally easier to use and will give more consistent results + in that it will run every time setup.py is run, regardless of which command + is used. ``stsci.distutils`` itself uses this hook--see the `setup.cfg` file + and `stsci/distutils/__init__.py` for example usage. + +- Instead of creating an `svninfo.py` module, the new ``version_`` hooks create + a file called `version.py`. In addition to the SVN info that was included + in `svninfo.py`, it includes a ``__version__`` variable to be used by the + package's `__init__.py`. This allows there to be a hard-coded + ``__version__`` variable included in the source code, rather than using + pkg_resources to get the version. + +- In `version.py`, the variables previously named ``__svn_version__`` and + ``__full_svn_info__`` are now named ``__svn_revision__`` and + ``__svn_full_info__``. + +- Fixed a bug when using stsci.distutils in the installation of other packages + in the ``stsci.*`` namespace package. If stsci.distutils was not already + installed, and was downloaded automatically by distribute through the + setup_requires option, then ``stsci.distutils`` would fail to import. This + is because the way the namespace package (nspkg) mechanism currently works, + all packages belonging to the nspkg *must* be on the import path at initial + import time. + + So when installing stsci.tools, for example, if ``stsci.tools`` is imported + from within the source code at install time, but before ``stsci.distutils`` + is downloaded and added to the path, the ``stsci`` package is already + imported and can't be extended to include the path of ``stsci.distutils`` + after the fact. The easiest way of dealing with this, it seems, is to + delete ``stsci`` from ``sys.modules``, which forces it to be reimported, now + the its ``__path__`` extended to include ``stsci.distutil``'s path. + + +0.2.2 (2011-11-09) +------------------ + +- Fixed check for the issue205 bug on actual setuptools installs; before it + only worked on distribute. setuptools has the issue205 bug prior to version + 0.6c10. + +- Improved the fix for the issue205 bug, especially on setuptools. + setuptools, prior to 0.6c10, did not back of sys.modules either before + sandboxing, which causes serious problems. In fact, it's so bad that it's + not enough to add a sys.modules backup to the current sandbox: It's in fact + necessary to monkeypatch setuptools.sandbox.run_setup so that any subsequent + calls to it also back up sys.modules. + + +0.2.1 (2011-09-02) +------------------ + +- Fixed the dependencies so that setuptools is requirement but 'distribute' + specifically. Previously installation could fail if users had plain + setuptools installed and not distribute + +0.2 (2011-08-23) +------------------ + +- Initial public release diff --git a/awx/lib/site-packages/pbr/tests/testpackage/LICENSE.txt b/awx/lib/site-packages/pbr/tests/testpackage/LICENSE.txt new file mode 100644 index 0000000000..7e8019a89e --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/LICENSE.txt @@ -0,0 +1,29 @@ +Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA) + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + 3. The name of AURA and its representatives may not be used to + endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + diff --git a/awx/lib/site-packages/pbr/tests/testpackage/MANIFEST.in b/awx/lib/site-packages/pbr/tests/testpackage/MANIFEST.in new file mode 100644 index 0000000000..cdc95eafc3 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/MANIFEST.in @@ -0,0 +1 @@ +include data_files/* diff --git a/awx/lib/site-packages/pbr/tests/testpackage/README.txt b/awx/lib/site-packages/pbr/tests/testpackage/README.txt new file mode 100644 index 0000000000..b6d84a7b1a --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/README.txt @@ -0,0 +1,148 @@ +Introduction +============ +This package contains utilities used to package some of STScI's Python +projects; specifically those projects that comprise stsci_python_ and +Astrolib_. + +It currently consists mostly of some setup_hook scripts meant for use with +`distutils2/packaging`_ and/or pbr_, and a customized easy_install command +meant for use with distribute_. + +This package is not meant for general consumption, though it might be worth +looking at for examples of how to do certain things with your own packages, but +YMMV. + +Features +======== + +Hook Scripts +------------ +Currently the main features of this package are a couple of setup_hook scripts. +In distutils2, a setup_hook is a script that runs at the beginning of any +pysetup command, and can modify the package configuration read from setup.cfg. +There are also pre- and post-command hooks that only run before/after a +specific setup command (eg. build_ext, install) is run. + +stsci.distutils.hooks.use_packages_root +''''''''''''''''''''''''''''''''''''''' +If using the ``packages_root`` option under the ``[files]`` section of +setup.cfg, this hook will add that path to ``sys.path`` so that modules in your +package can be imported and used in setup. This can be used even if +``packages_root`` is not specified--in this case it adds ``''`` to +``sys.path``. + +stsci.distutils.hooks.version_setup_hook +'''''''''''''''''''''''''''''''''''''''' +Creates a Python module called version.py which currently contains four +variables: + +* ``__version__`` (the release version) +* ``__svn_revision__`` (the SVN revision info as returned by the ``svnversion`` + command) +* ``__svn_full_info__`` (as returned by the ``svn info`` command) +* ``__setup_datetime__`` (the date and time that setup.py was last run). + +These variables can be imported in the package's `__init__.py` for degugging +purposes. The version.py module will *only* be created in a package that +imports from the version module in its `__init__.py`. It should be noted that +this is generally preferable to writing these variables directly into +`__init__.py`, since this provides more control and is less likely to +unexpectedly break things in `__init__.py`. + +stsci.distutils.hooks.version_pre_command_hook +'''''''''''''''''''''''''''''''''''''''''''''' +Identical to version_setup_hook, but designed to be used as a pre-command +hook. + +stsci.distutils.hooks.version_post_command_hook +''''''''''''''''''''''''''''''''''''''''''''''' +The complement to version_pre_command_hook. This will delete any version.py +files created during a build in order to prevent them from cluttering an SVN +working copy (note, however, that version.py is *not* deleted from the build/ +directory, so a copy of it is still preserved). It will also not be deleted +if the current directory is not an SVN working copy. For example, if source +code extracted from a source tarball it will be preserved. + +stsci.distutils.hooks.tag_svn_revision +'''''''''''''''''''''''''''''''''''''' +A setup_hook to add the SVN revision of the current working copy path to the +package version string, but only if the version ends in .dev. + +For example, ``mypackage-1.0.dev`` becomes ``mypackage-1.0.dev1234``. This is +in accordance with the version string format standardized by PEP 386. + +This should be used as a replacement for the ``tag_svn_revision`` option to +the egg_info command. This hook is more compatible with packaging/distutils2, +which does not include any VCS support. This hook is also more flexible in +that it turns the revision number on/off depending on the presence of ``.dev`` +in the version string, so that it's not automatically added to the version in +final releases. + +This hook does require the ``svnversion`` command to be available in order to +work. It does not examine the working copy metadata directly. + +stsci.distutils.hooks.numpy_extension_hook +'''''''''''''''''''''''''''''''''''''''''' +This is a pre-command hook for the build_ext command. To use it, add a +``[build_ext]`` section to your setup.cfg, and add to it:: + + pre-hook.numpy-extension-hook = stsci.distutils.hooks.numpy_extension_hook + +This hook must be used to build extension modules that use Numpy. The primary +side-effect of this hook is to add the correct numpy include directories to +`include_dirs`. To use it, add 'numpy' to the 'include-dirs' option of each +extension module that requires numpy to build. The value 'numpy' will be +replaced with the actual path to the numpy includes. + +stsci.distutils.hooks.is_display_option +''''''''''''''''''''''''''''''''''''''' +This is not actually a hook, but is a useful utility function that can be used +in writing other hooks. Basically, it returns ``True`` if setup.py was run +with a "display option" such as --version or --help. This can be used to +prevent your hook from running in such cases. + +stsci.distutils.hooks.glob_data_files +''''''''''''''''''''''''''''''''''''' +A pre-command hook for the install_data command. Allows filename wildcards as +understood by ``glob.glob()`` to be used in the data_files option. This hook +must be used in order to have this functionality since it does not normally +exist in distutils. + +This hook also ensures that data files are installed relative to the package +path. data_files shouldn't normally be installed this way, but the +functionality is required for a few special cases. + + +Commands +-------- +build_optional_ext +'''''''''''''''''' +This serves as an optional replacement for the default built_ext command, +which compiles C extension modules. Its purpose is to allow extension modules +to be *optional*, so that if their build fails the rest of the package is +still allowed to be built and installed. This can be used when an extension +module is not definitely required to use the package. + +To use this custom command, add:: + + commands = stsci.distutils.command.build_optional_ext.build_optional_ext + +under the ``[global]`` section of your package's setup.cfg. Then, to mark +an individual extension module as optional, under the setup.cfg section for +that extension add:: + + optional = True + +Optionally, you may also add a custom failure message by adding:: + + fail_message = The foobar extension module failed to compile. + This could be because you lack such and such headers. + This package will still work, but such and such features + will be disabled. + + +.. _stsci_python: http://www.stsci.edu/resources/software_hardware/pyraf/stsci_python +.. _Astrolib: http://www.scipy.org/AstroLib/ +.. _distutils2/packaging: http://distutils2.notmyidea.org/ +.. _d2to1: http://pypi.python.org/pypi/d2to1 +.. _distribute: http://pypi.python.org/pypi/distribute diff --git a/awx/lib/site-packages/pbr/tests/testpackage/data_files/a.txt b/awx/lib/site-packages/pbr/tests/testpackage/data_files/a.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pbr/tests/testpackage/data_files/b.txt b/awx/lib/site-packages/pbr/tests/testpackage/data_files/b.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pbr/tests/testpackage/data_files/c.rst b/awx/lib/site-packages/pbr/tests/testpackage/data_files/c.rst new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pbr/tests/testpackage/extra-file.txt b/awx/lib/site-packages/pbr/tests/testpackage/extra-file.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pbr/tests/testpackage/git-extra-file.txt b/awx/lib/site-packages/pbr/tests/testpackage/git-extra-file.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/__init__.py b/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/_setup_hooks.py b/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/_setup_hooks.py new file mode 100644 index 0000000000..f8b3087604 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/_setup_hooks.py @@ -0,0 +1,65 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +from distutils.command import build_py + + +def test_hook_1(config): + print('test_hook_1') + + +def test_hook_2(config): + print('test_hook_2') + + +class test_command(build_py.build_py): + command_name = 'build_py' + + def run(self): + print('Running custom build_py command.') + return build_py.build_py.run(self) + + +def test_pre_hook(cmdobj): + print('build_ext pre-hook') + + +def test_post_hook(cmdobj): + print('build_ext post-hook') diff --git a/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/cmd.py b/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/cmd.py new file mode 100644 index 0000000000..4cc4522f10 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/cmd.py @@ -0,0 +1,26 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function + + +def main(): + print("PBR Test Command") + + +class Foo(object): + + @classmethod + def bar(self): + print("PBR Test Command - with class!") diff --git a/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/package_data/1.txt b/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/package_data/1.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/package_data/2.txt b/awx/lib/site-packages/pbr/tests/testpackage/pbr_testpackage/package_data/2.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pbr/tests/testpackage/setup.cfg b/awx/lib/site-packages/pbr/tests/testpackage/setup.cfg new file mode 100644 index 0000000000..a410e3c7d9 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/setup.cfg @@ -0,0 +1,51 @@ +[metadata] +name = pbr_testpackage +version = 0.1.dev +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://pypi.python.org/pypi/pbr +summary = Test package for testing pbr +description-file = + README.txt + CHANGES.txt +requires-python = >=2.5 + +requires-dist = + setuptools + +classifier = + Development Status :: 3 - Alpha + Intended Audience :: Developers + License :: OSI Approved :: BSD License + Programming Language :: Python + Topic :: Scientific/Engineering + Topic :: Software Development :: Build Tools + Topic :: Software Development :: Libraries :: Python Modules + Topic :: System :: Archiving :: Packaging + +keywords = packaging, distutils, setuptools + +[files] +packages = pbr_testpackage +package-data = testpackage = package_data/*.txt +data-files = testpackage/data_files = data_files/*.txt +extra-files = extra-file.txt + +[entry_points] +console_scripts = + pbr_test_cmd = pbr_testpackage.cmd:main + pbr_test_cmd_with_class = pbr_testpackage.cmd:Foo.bar + +[extension=pbr_testpackage.testext] +sources = src/testext.c +optional = True + +[global] +#setup-hooks = +# pbr_testpackage._setup_hooks.test_hook_1 +# pbr_testpackage._setup_hooks.test_hook_2 +commands = pbr_testpackage._setup_hooks.test_command + +[build_ext] +#pre-hook.test_pre_hook = pbr_testpackage._setup_hooks.test_pre_hook +#post-hook.test_post_hook = pbr_testpackage._setup_hooks.test_post_hook diff --git a/awx/lib/site-packages/pbr/tests/testpackage/setup.py b/awx/lib/site-packages/pbr/tests/testpackage/setup.py new file mode 100644 index 0000000000..8866691061 --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/setup.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import setuptools + +setuptools.setup( + setup_requires=['pbr'], + pbr=True, +) diff --git a/awx/lib/site-packages/pbr/tests/testpackage/src/testext.c b/awx/lib/site-packages/pbr/tests/testpackage/src/testext.c new file mode 100644 index 0000000000..872d43c04f --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/testpackage/src/testext.c @@ -0,0 +1,28 @@ +#include + + +static PyMethodDef TestextMethods[] = { + {NULL, NULL, 0, NULL} +}; + + +#if PY_MAJOR_VERSION >=3 +static struct PyModuleDef testextmodule = { + PyModuleDef_HEAD_INIT, + "testext", + -1, + TestextMethods +}; + +PyObject* +PyInit_testext(void) +{ + return PyModule_Create(&testextmodule); +} +#else +PyMODINIT_FUNC +inittestext(void) +{ + Py_InitModule("testext", TestextMethods); +} +#endif diff --git a/awx/lib/site-packages/pbr/tests/util.py b/awx/lib/site-packages/pbr/tests/util.py new file mode 100644 index 0000000000..de5a7401aa --- /dev/null +++ b/awx/lib/site-packages/pbr/tests/util.py @@ -0,0 +1,74 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +import contextlib +import os +import shutil +import stat + +try: + import ConfigParser as configparser +except ImportError: + import configparser + + +@contextlib.contextmanager +def open_config(filename): + cfg = configparser.ConfigParser() + cfg.read(filename) + yield cfg + with open(filename, 'w') as fp: + cfg.write(fp) + + +def rmtree(path): + """shutil.rmtree() with error handler. + + Handle 'access denied' from trying to delete read-only files. + """ + + def onerror(func, path, exc_info): + if not os.access(path, os.W_OK): + os.chmod(path, stat.S_IWUSR) + func(path) + else: + raise + + return shutil.rmtree(path, onerror=onerror) diff --git a/awx/lib/site-packages/pbr/util.py b/awx/lib/site-packages/pbr/util.py new file mode 100644 index 0000000000..1682b66926 --- /dev/null +++ b/awx/lib/site-packages/pbr/util.py @@ -0,0 +1,621 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (C) 2013 Association of Universities for Research in Astronomy +# (AURA) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of AURA and its representatives may not be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + +"""The code in this module is mostly copy/pasted out of the distutils2 source +code, as recommended by Tarek Ziade. As such, it may be subject to some change +as distutils2 development continues, and will have to be kept up to date. + +I didn't want to use it directly from distutils2 itself, since I do not want it +to be an installation dependency for our packages yet--it is still too unstable +(the latest version on PyPI doesn't even install). +""" + +# These first two imports are not used, but are needed to get around an +# irritating Python bug that can crop up when using ./setup.py test. +# See: http://www.eby-sarna.com/pipermail/peak/2010-May/003355.html +try: + import multiprocessing # flake8: noqa +except ImportError: + pass +import logging # flake8: noqa + +import os +import re +import sys +import traceback + +from collections import defaultdict + +import distutils.ccompiler + +from distutils import log +from distutils.errors import (DistutilsOptionError, DistutilsModuleError, + DistutilsFileError) +from setuptools.command.egg_info import manifest_maker +from setuptools.dist import Distribution +from setuptools.extension import Extension + +try: + import ConfigParser as configparser +except ImportError: + import configparser + +from pbr import extra_files +import pbr.hooks + +# A simplified RE for this; just checks that the line ends with version +# predicates in () +_VERSION_SPEC_RE = re.compile(r'\s*(.*?)\s*\((.*)\)\s*$') + + +# Mappings from setup() keyword arguments to setup.cfg options; +# The values are (section, option) tuples, or simply (section,) tuples if +# the option has the same name as the setup() argument +D1_D2_SETUP_ARGS = { + "name": ("metadata",), + "version": ("metadata",), + "author": ("metadata",), + "author_email": ("metadata",), + "maintainer": ("metadata",), + "maintainer_email": ("metadata",), + "url": ("metadata", "home_page"), + "description": ("metadata", "summary"), + "keywords": ("metadata",), + "long_description": ("metadata", "description"), + "download-url": ("metadata",), + "classifiers": ("metadata", "classifier"), + "platforms": ("metadata", "platform"), # ** + "license": ("metadata",), + # Use setuptools install_requires, not + # broken distutils requires + "install_requires": ("metadata", "requires_dist"), + "setup_requires": ("metadata", "setup_requires_dist"), + "provides": ("metadata", "provides_dist"), # ** + "obsoletes": ("metadata", "obsoletes_dist"), # ** + "package_dir": ("files", 'packages_root'), + "packages": ("files",), + "package_data": ("files",), + "namespace_packages": ("files",), + "data_files": ("files",), + "scripts": ("files",), + "py_modules": ("files", "modules"), # ** + "cmdclass": ("global", "commands"), + # Not supported in distutils2, but provided for + # backwards compatibility with setuptools + "use_2to3": ("backwards_compat", "use_2to3"), + "zip_safe": ("backwards_compat", "zip_safe"), + "tests_require": ("backwards_compat", "tests_require"), + "dependency_links": ("backwards_compat",), + "include_package_data": ("backwards_compat",), +} + +# setup() arguments that can have multiple values in setup.cfg +MULTI_FIELDS = ("classifiers", + "platforms", + "install_requires", + "provides", + "obsoletes", + "namespace_packages", + "packages", + "package_data", + "data_files", + "scripts", + "py_modules", + "dependency_links", + "setup_requires", + "tests_require", + "cmdclass") + +# setup() arguments that contain boolean values +BOOL_FIELDS = ("use_2to3", "zip_safe", "include_package_data") + + +CSV_FIELDS = ("keywords",) + + +log.set_verbosity(log.INFO) + + +def resolve_name(name): + """Resolve a name like ``module.object`` to an object and return it. + + Raise ImportError if the module or name is not found. + """ + + parts = name.split('.') + cursor = len(parts) - 1 + module_name = parts[:cursor] + attr_name = parts[-1] + + while cursor > 0: + try: + ret = __import__('.'.join(module_name), fromlist=[attr_name]) + break + except ImportError: + if cursor == 0: + raise + cursor -= 1 + module_name = parts[:cursor] + attr_name = parts[cursor] + ret = '' + + for part in parts[cursor:]: + try: + ret = getattr(ret, part) + except AttributeError: + raise ImportError(name) + + return ret + + +def cfg_to_args(path='setup.cfg'): + """ Distutils2 to distutils1 compatibility util. + + This method uses an existing setup.cfg to generate a dictionary of + keywords that can be used by distutils.core.setup(kwargs**). + + :param file: + The setup.cfg path. + :raises DistutilsFileError: + When the setup.cfg file is not found. + + """ + + # The method source code really starts here. + parser = configparser.RawConfigParser() + if not os.path.exists(path): + raise DistutilsFileError("file '%s' does not exist" % + os.path.abspath(path)) + parser.read(path) + config = {} + for section in parser.sections(): + config[section] = dict(parser.items(section)) + + # Run setup_hooks, if configured + setup_hooks = has_get_option(config, 'global', 'setup_hooks') + package_dir = has_get_option(config, 'files', 'packages_root') + + # Add the source package directory to sys.path in case it contains + # additional hooks, and to make sure it's on the path before any existing + # installations of the package + if package_dir: + package_dir = os.path.abspath(package_dir) + sys.path.insert(0, package_dir) + + try: + if setup_hooks: + setup_hooks = [ + hook for hook in split_multiline(setup_hooks) + if hook != 'pbr.hooks.setup_hook'] + for hook in setup_hooks: + hook_fn = resolve_name(hook) + try : + hook_fn(config) + except SystemExit: + log.error('setup hook %s terminated the installation') + except: + e = sys.exc_info()[1] + log.error('setup hook %s raised exception: %s\n' % + (hook, e)) + log.error(traceback.format_exc()) + sys.exit(1) + + # Run the pbr hook + pbr.hooks.setup_hook(config) + + kwargs = setup_cfg_to_setup_kwargs(config) + + # Set default config overrides + kwargs['include_package_data'] = True + kwargs['zip_safe'] = False + + register_custom_compilers(config) + + ext_modules = get_extension_modules(config) + if ext_modules: + kwargs['ext_modules'] = ext_modules + + entry_points = get_entry_points(config) + if entry_points: + kwargs['entry_points'] = entry_points + + wrap_commands(kwargs) + + # Handle the [files]/extra_files option + files_extra_files = has_get_option(config, 'files', 'extra_files') + if files_extra_files: + extra_files.set_extra_files(split_multiline(files_extra_files)) + + finally: + # Perform cleanup if any paths were added to sys.path + if package_dir: + sys.path.pop(0) + + return kwargs + + +def setup_cfg_to_setup_kwargs(config): + """Processes the setup.cfg options and converts them to arguments accepted + by setuptools' setup() function. + """ + + kwargs = {} + + for arg in D1_D2_SETUP_ARGS: + if len(D1_D2_SETUP_ARGS[arg]) == 2: + # The distutils field name is different than distutils2's. + section, option = D1_D2_SETUP_ARGS[arg] + + elif len(D1_D2_SETUP_ARGS[arg]) == 1: + # The distutils field name is the same thant distutils2's. + section = D1_D2_SETUP_ARGS[arg][0] + option = arg + + in_cfg_value = has_get_option(config, section, option) + if not in_cfg_value: + # There is no such option in the setup.cfg + if arg == "long_description": + in_cfg_value = has_get_option(config, section, + "description_file") + if in_cfg_value: + in_cfg_value = split_multiline(in_cfg_value) + value = '' + for filename in in_cfg_value: + description_file = open(filename) + try: + value += description_file.read().strip() + '\n\n' + finally: + description_file.close() + in_cfg_value = value + else: + continue + + if arg in CSV_FIELDS: + in_cfg_value = split_csv(in_cfg_value) + if arg in MULTI_FIELDS: + in_cfg_value = split_multiline(in_cfg_value) + elif arg in BOOL_FIELDS: + # Provide some flexibility here... + if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'): + in_cfg_value = True + else: + in_cfg_value = False + + if in_cfg_value: + if arg in ('install_requires', 'tests_require'): + # Replaces PEP345-style version specs with the sort expected by + # setuptools + in_cfg_value = [_VERSION_SPEC_RE.sub(r'\1\2', pred) + for pred in in_cfg_value] + elif arg == 'package_dir': + in_cfg_value = {'': in_cfg_value} + elif arg in ('package_data', 'data_files'): + data_files = {} + firstline = True + prev = None + for line in in_cfg_value: + if '=' in line: + key, value = line.split('=', 1) + key, value = (key.strip(), value.strip()) + if key in data_files: + # Multiple duplicates of the same package name; + # this is for backwards compatibility of the old + # format prior to d2to1 0.2.6. + prev = data_files[key] + prev.extend(value.split()) + else: + prev = data_files[key.strip()] = value.split() + elif firstline: + raise DistutilsOptionError( + 'malformed package_data first line %r (misses ' + '"=")' % line) + else: + prev.extend(line.strip().split()) + firstline = False + if arg == 'data_files': + # the data_files value is a pointlessly different structure + # from the package_data value + data_files = data_files.items() + in_cfg_value = data_files + elif arg == 'cmdclass': + cmdclass = {} + dist = Distribution() + for cls in in_cfg_value: + cls = resolve_name(cls) + cmd = cls(dist) + cmdclass[cmd.get_command_name()] = cls + in_cfg_value = cmdclass + + kwargs[arg] = in_cfg_value + + return kwargs + + +def register_custom_compilers(config): + """Handle custom compilers; this has no real equivalent in distutils, where + additional compilers could only be added programmatically, so we have to + hack it in somehow. + """ + + compilers = has_get_option(config, 'global', 'compilers') + if compilers: + compilers = split_multiline(compilers) + for compiler in compilers: + compiler = resolve_name(compiler) + + # In distutils2 compilers these class attributes exist; for + # distutils1 we just have to make something up + if hasattr(compiler, 'name'): + name = compiler.name + else: + name = compiler.__name__ + if hasattr(compiler, 'description'): + desc = compiler.description + else: + desc = 'custom compiler %s' % name + + module_name = compiler.__module__ + # Note; this *will* override built in compilers with the same name + # TODO: Maybe display a warning about this? + cc = distutils.ccompiler.compiler_class + cc[name] = (module_name, compiler.__name__, desc) + + # HACK!!!! Distutils assumes all compiler modules are in the + # distutils package + sys.modules['distutils.' + module_name] = sys.modules[module_name] + + +def get_extension_modules(config): + """Handle extension modules""" + + EXTENSION_FIELDS = ("sources", + "include_dirs", + "define_macros", + "undef_macros", + "library_dirs", + "libraries", + "runtime_library_dirs", + "extra_objects", + "extra_compile_args", + "extra_link_args", + "export_symbols", + "swig_opts", + "depends") + + ext_modules = [] + for section in config: + if ':' in section: + labels = section.split(':', 1) + else: + # Backwards compatibility for old syntax; don't use this though + labels = section.split('=', 1) + labels = [l.strip() for l in labels] + if (len(labels) == 2) and (labels[0] == 'extension'): + ext_args = {} + for field in EXTENSION_FIELDS: + value = has_get_option(config, section, field) + # All extension module options besides name can have multiple + # values + if not value: + continue + value = split_multiline(value) + if field == 'define_macros': + macros = [] + for macro in value: + macro = macro.split('=', 1) + if len(macro) == 1: + macro = (macro[0].strip(), None) + else: + macro = (macro[0].strip(), macro[1].strip()) + macros.append(macro) + value = macros + ext_args[field] = value + if ext_args: + if 'name' not in ext_args: + ext_args['name'] = labels[1] + ext_modules.append(Extension(ext_args.pop('name'), + **ext_args)) + return ext_modules + + +def get_entry_points(config): + """Process the [entry_points] section of setup.cfg to handle setuptools + entry points. This is, of course, not a standard feature of + distutils2/packaging, but as there is not currently a standard alternative + in packaging, we provide support for them. + """ + + if not 'entry_points' in config: + return {} + + return dict((option, split_multiline(value)) + for option, value in config['entry_points'].items()) + + +def wrap_commands(kwargs): + dist = Distribution() + + # This should suffice to get the same config values and command classes + # that the actual Distribution will see (not counting cmdclass, which is + # handled below) + dist.parse_config_files() + + for cmd, _ in dist.get_command_list(): + hooks = {} + for opt, val in dist.get_option_dict(cmd).items(): + val = val[1] + if opt.startswith('pre_hook.') or opt.startswith('post_hook.'): + hook_type, alias = opt.split('.', 1) + hook_dict = hooks.setdefault(hook_type, {}) + hook_dict[alias] = val + if not hooks: + continue + + if 'cmdclass' in kwargs and cmd in kwargs['cmdclass']: + cmdclass = kwargs['cmdclass'][cmd] + else: + cmdclass = dist.get_command_class(cmd) + + new_cmdclass = wrap_command(cmd, cmdclass, hooks) + kwargs.setdefault('cmdclass', {})[cmd] = new_cmdclass + + +def wrap_command(cmd, cmdclass, hooks): + def run(self, cmdclass=cmdclass): + self.run_command_hooks('pre_hook') + cmdclass.run(self) + self.run_command_hooks('post_hook') + + return type(cmd, (cmdclass, object), + {'run': run, 'run_command_hooks': run_command_hooks, + 'pre_hook': hooks.get('pre_hook'), + 'post_hook': hooks.get('post_hook')}) + + +def run_command_hooks(cmd_obj, hook_kind): + """Run hooks registered for that command and phase. + + *cmd_obj* is a finalized command object; *hook_kind* is either + 'pre_hook' or 'post_hook'. + """ + + if hook_kind not in ('pre_hook', 'post_hook'): + raise ValueError('invalid hook kind: %r' % hook_kind) + + hooks = getattr(cmd_obj, hook_kind, None) + + if hooks is None: + return + + for hook in hooks.values(): + if isinstance(hook, str): + try: + hook_obj = resolve_name(hook) + except ImportError: + err = sys.exc_info()[1] # For py3k + raise DistutilsModuleError('cannot find hook %s: %s' % + (hook,err)) + else: + hook_obj = hook + + if not hasattr(hook_obj, '__call__'): + raise DistutilsOptionError('hook %r is not callable' % hook) + + log.info('running %s %s for command %s', + hook_kind, hook, cmd_obj.get_command_name()) + + try : + hook_obj(cmd_obj) + except: + e = sys.exc_info()[1] + log.error('hook %s raised exception: %s\n' % (hook, e)) + log.error(traceback.format_exc()) + sys.exit(1) + + +def has_get_option(config, section, option): + if section in config and option in config[section]: + return config[section][option] + elif section in config and option.replace('_', '-') in config[section]: + return config[section][option.replace('_', '-')] + else: + return False + + +def split_multiline(value): + """Special behaviour when we have a multi line options""" + + value = [element for element in + (line.strip() for line in value.split('\n')) + if element] + return value + + +def split_csv(value): + """Special behaviour when we have a comma separated options""" + + value = [element for element in + (chunk.strip() for chunk in value.split(',')) + if element] + return value + + +def monkeypatch_method(cls): + """A function decorator to monkey-patch a method of the same name on the + given class. + """ + + def wrapper(func): + orig = getattr(cls, func.__name__, None) + if orig and not hasattr(orig, '_orig'): # Already patched + setattr(func, '_orig', orig) + setattr(cls, func.__name__, func) + return func + + return wrapper + + +# The following classes are used to hack Distribution.command_options a bit +class DefaultGetDict(defaultdict): + """Like defaultdict, but the get() method also sets and returns the default + value. + """ + + def get(self, key, default=None): + if default is None: + default = self.default_factory() + return super(DefaultGetDict, self).setdefault(key, default) + + +class IgnoreDict(dict): + """A dictionary that ignores any insertions in which the key is a string + matching any string in `ignore`. The ignore list can also contain wildcard + patterns using '*'. + """ + + def __init__(self, ignore): + self.__ignore = re.compile(r'(%s)' % ('|'.join( + [pat.replace('*', '.*') + for pat in ignore]))) + + def __setitem__(self, key, val): + if self.__ignore.match(key): + return + super(IgnoreDict, self).__setitem__(key, val) diff --git a/awx/lib/site-packages/pbr/version.py b/awx/lib/site-packages/pbr/version.py new file mode 100644 index 0000000000..87a5ca960d --- /dev/null +++ b/awx/lib/site-packages/pbr/version.py @@ -0,0 +1,95 @@ + +# Copyright 2012 OpenStack Foundation +# Copyright 2012-2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utilities for consuming the version from pkg_resources. +""" + +import pkg_resources + + +class VersionInfo(object): + + def __init__(self, package): + """Object that understands versioning for a package + :param package: name of the python package, such as glance, or + python-glanceclient + """ + self.package = package + self.release = None + self.version = None + self._cached_version = None + + def __str__(self): + """Make the VersionInfo object behave like a string.""" + return self.version_string() + + def __repr__(self): + """Include the name.""" + return "VersionInfo(%s:%s)" % (self.package, self.version_string()) + + def _get_version_from_pkg_resources(self): + """Get the version of the package from the pkg_resources record + associated with the package. + """ + try: + requirement = pkg_resources.Requirement.parse(self.package) + provider = pkg_resources.get_provider(requirement) + return provider.version + except pkg_resources.DistributionNotFound: + # The most likely cause for this is running tests in a tree + # produced from a tarball where the package itself has not been + # installed into anything. Revert to setup-time logic. + from pbr import packaging + return packaging.get_version(self.package) + + def release_string(self): + """Return the full version of the package including suffixes indicating + VCS status. + """ + if self.release is None: + self.release = self._get_version_from_pkg_resources() + + return self.release + + def version_string(self): + """Return the short version minus any alpha/beta tags.""" + if self.version is None: + parts = [] + for part in self.release_string().split('.'): + if part[0].isdigit(): + parts.append(part) + else: + break + self.version = ".".join(parts) + + return self.version + + # Compatibility functions + canonical_version_string = version_string + version_string_with_vcs = release_string + + def cached_version_string(self, prefix=""): + """Generate an object which will expand in a string context to + the results of version_string(). We do this so that don't + call into pkg_resources every time we start up a program when + passing version information into the CONF constructor, but + rather only do the calculation when and if a version is requested + """ + if not self._cached_version: + self._cached_version = "%s%s" % (prefix, + self.version_string()) + return self._cached_version diff --git a/awx/lib/site-packages/pip/__init__.py b/awx/lib/site-packages/pip/__init__.py new file mode 100644 index 0000000000..8bc68aaca2 --- /dev/null +++ b/awx/lib/site-packages/pip/__init__.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python +import os +import optparse + +import sys +import re + +from pip.exceptions import InstallationError, CommandError, PipError +from pip.log import logger +from pip.util import get_installed_distributions, get_prog +from pip.vcs import git, mercurial, subversion, bazaar # noqa +from pip.baseparser import create_main_parser +from pip.commands import commands, get_similar_commands, get_summaries + + +# The version as used in the setup.py and the docs conf.py +__version__ = "1.4.1" + +def autocomplete(): + """Command and option completion for the main option parser (and options) + and its subcommands (and options). + + Enable by sourcing one of the completion shell scripts (bash or zsh). + """ + # Don't complete if user hasn't sourced bash_completion file. + if 'PIP_AUTO_COMPLETE' not in os.environ: + return + cwords = os.environ['COMP_WORDS'].split()[1:] + cword = int(os.environ['COMP_CWORD']) + try: + current = cwords[cword - 1] + except IndexError: + current = '' + + subcommands = [cmd for cmd, summary in get_summaries()] + options = [] + # subcommand + try: + subcommand_name = [w for w in cwords if w in subcommands][0] + except IndexError: + subcommand_name = None + + parser = create_main_parser() + # subcommand options + if subcommand_name: + # special case: 'help' subcommand has no options + if subcommand_name == 'help': + sys.exit(1) + # special case: list locally installed dists for uninstall command + if subcommand_name == 'uninstall' and not current.startswith('-'): + installed = [] + lc = current.lower() + for dist in get_installed_distributions(local_only=True): + if dist.key.startswith(lc) and dist.key not in cwords[1:]: + installed.append(dist.key) + # if there are no dists installed, fall back to option completion + if installed: + for dist in installed: + print(dist) + sys.exit(1) + + subcommand = commands[subcommand_name](parser) + options += [(opt.get_opt_string(), opt.nargs) + for opt in subcommand.parser.option_list_all + if opt.help != optparse.SUPPRESS_HELP] + + # filter out previously specified options from available options + prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] + options = [(x, v) for (x, v) in options if x not in prev_opts] + # filter options by current input + options = [(k, v) for k, v in options if k.startswith(current)] + for option in options: + opt_label = option[0] + # append '=' to options which require args + if option[1]: + opt_label += '=' + print(opt_label) + else: + # show main parser options only when necessary + if current.startswith('-') or current.startswith('--'): + opts = [i.option_list for i in parser.option_groups] + opts.append(parser.option_list) + opts = (o for it in opts for o in it) + + subcommands += [i.get_opt_string() for i in opts + if i.help != optparse.SUPPRESS_HELP] + + print(' '.join([x for x in subcommands if x.startswith(current)])) + sys.exit(1) + + +def parseopts(args): + parser = create_main_parser() + parser.main = True # so the help formatter knows + + # create command listing + command_summaries = get_summaries() + + description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] + parser.description = '\n'.join(description) + + options, args = parser.parse_args(args) + + if options.version: + sys.stdout.write(parser.version) + sys.stdout.write(os.linesep) + sys.exit() + + # pip || pip help || pip --help -> print_help() + if not args or (args[0] == 'help' and len(args) == 1): + parser.print_help() + sys.exit() + + if not args: + msg = ('You must give a command ' + '(use "pip --help" to see a list of commands)') + raise CommandError(msg) + + command = args[0].lower() + + if command not in commands: + guess = get_similar_commands(command) + + msg = ['unknown command "%s"' % command] + if guess: + msg.append('maybe you meant "%s"' % guess) + + raise CommandError(' - '.join(msg)) + + return command, options, args, parser + + +def main(initial_args=None): + if initial_args is None: + initial_args = sys.argv[1:] + + autocomplete() + + try: + cmd_name, options, args, parser = parseopts(initial_args) + except PipError: + e = sys.exc_info()[1] + sys.stderr.write("ERROR: %s" % e) + sys.stderr.write(os.linesep) + sys.exit(1) + + command = commands[cmd_name](parser) # see baseparser.Command + return command.main(args[1:], options) + + +def bootstrap(): + """ + Bootstrapping function to be called from install-pip.py script. + """ + return main(['install', '--upgrade', 'pip'] + sys.argv[1:]) + +############################################################ +## Writing freeze files + + +class FrozenRequirement(object): + + def __init__(self, name, req, editable, comments=()): + self.name = name + self.req = req + self.editable = editable + self.comments = comments + + _rev_re = re.compile(r'-r(\d+)$') + _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') + + @classmethod + def from_dist(cls, dist, dependency_links, find_tags=False): + location = os.path.normcase(os.path.abspath(dist.location)) + comments = [] + from pip.vcs import vcs, get_src_requirement + if vcs.get_backend_name(location): + editable = True + try: + req = get_src_requirement(dist, location, find_tags) + except InstallationError: + ex = sys.exc_info()[1] + logger.warn("Error when trying to get requirement for VCS system %s, falling back to uneditable format" % ex) + req = None + if req is None: + logger.warn('Could not determine repository location of %s' % location) + comments.append('## !! Could not determine repository location') + req = dist.as_requirement() + editable = False + else: + editable = False + req = dist.as_requirement() + specs = req.specs + assert len(specs) == 1 and specs[0][0] == '==' + version = specs[0][1] + ver_match = cls._rev_re.search(version) + date_match = cls._date_re.search(version) + if ver_match or date_match: + svn_backend = vcs.get_backend('svn') + if svn_backend: + svn_location = svn_backend( + ).get_location(dist, dependency_links) + if not svn_location: + logger.warn( + 'Warning: cannot find svn location for %s' % req) + comments.append('## FIXME: could not find svn URL in dependency_links for this package:') + else: + comments.append('# Installing as editable to satisfy requirement %s:' % req) + if ver_match: + rev = ver_match.group(1) + else: + rev = '{%s}' % date_match.group(1) + editable = True + req = '%s@%s#egg=%s' % (svn_location, rev, cls.egg_name(dist)) + return cls(dist.project_name, req, editable, comments) + + @staticmethod + def egg_name(dist): + name = dist.egg_name() + match = re.search(r'-py\d\.\d$', name) + if match: + name = name[:match.start()] + return name + + def __str__(self): + req = self.req + if self.editable: + req = '-e %s' % req + return '\n'.join(list(self.comments) + [str(req)]) + '\n' + + +if __name__ == '__main__': + exit = main() + if exit: + sys.exit(exit) diff --git a/awx/lib/site-packages/pip/__main__.py b/awx/lib/site-packages/pip/__main__.py new file mode 100644 index 0000000000..5ca3746342 --- /dev/null +++ b/awx/lib/site-packages/pip/__main__.py @@ -0,0 +1,7 @@ +import sys +from .runner import run + +if __name__ == '__main__': + exit = run() + if exit: + sys.exit(exit) diff --git a/awx/lib/site-packages/pip/backwardcompat/__init__.py b/awx/lib/site-packages/pip/backwardcompat/__init__.py new file mode 100644 index 0000000000..465759d802 --- /dev/null +++ b/awx/lib/site-packages/pip/backwardcompat/__init__.py @@ -0,0 +1,125 @@ +"""Stuff that differs in different Python versions""" + +import os +import imp +import sys +import site + +__all__ = ['WindowsError'] + +uses_pycache = hasattr(imp, 'cache_from_source') + +class NeverUsedException(Exception): + """this exception should never be raised""" + +try: + WindowsError = WindowsError +except NameError: + WindowsError = NeverUsedException + +try: + #new in Python 3.3 + PermissionError = PermissionError +except NameError: + PermissionError = NeverUsedException + +console_encoding = sys.__stdout__.encoding + +if sys.version_info >= (3,): + from io import StringIO, BytesIO + from functools import reduce + from urllib.error import URLError, HTTPError + from queue import Queue, Empty + from urllib.request import url2pathname + from urllib.request import urlretrieve + from email import message as emailmessage + import urllib.parse as urllib + import urllib.request as urllib2 + import configparser as ConfigParser + import xmlrpc.client as xmlrpclib + import urllib.parse as urlparse + import http.client as httplib + + def cmp(a, b): + return (a > b) - (a < b) + + def b(s): + return s.encode('utf-8') + + def u(s): + return s.decode('utf-8') + + def console_to_str(s): + try: + return s.decode(console_encoding) + except UnicodeDecodeError: + return s.decode('utf_8') + + def fwrite(f, s): + f.buffer.write(b(s)) + + def get_http_message_param(http_message, param, default_value): + return http_message.get_param(param, default_value) + + bytes = bytes + string_types = (str,) + raw_input = input +else: + from cStringIO import StringIO + from urllib2 import URLError, HTTPError + from Queue import Queue, Empty + from urllib import url2pathname, urlretrieve + from email import Message as emailmessage + import urllib + import urllib2 + import urlparse + import ConfigParser + import xmlrpclib + import httplib + + def b(s): + return s + + def u(s): + return s + + def console_to_str(s): + return s + + def fwrite(f, s): + f.write(s) + + def get_http_message_param(http_message, param, default_value): + result = http_message.getparam(param) + return result or default_value + + bytes = str + string_types = (basestring,) + reduce = reduce + cmp = cmp + raw_input = raw_input + BytesIO = StringIO + + +from distutils.sysconfig import get_python_lib, get_python_version + +#site.USER_SITE was created in py2.6 +user_site = getattr(site, 'USER_SITE', None) + + +def product(*args, **kwds): + # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy + # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111 + pools = list(map(tuple, args)) * kwds.get('repeat', 1) + result = [[]] + for pool in pools: + result = [x + [y] for x in result for y in pool] + for prod in result: + yield tuple(prod) + + +## only >=py32 has ssl.match_hostname and ssl.CertificateError +try: + from ssl import match_hostname, CertificateError +except ImportError: + from ssl_match_hostname import match_hostname, CertificateError diff --git a/awx/lib/site-packages/pip/backwardcompat/ssl_match_hostname.py b/awx/lib/site-packages/pip/backwardcompat/ssl_match_hostname.py new file mode 100644 index 0000000000..a6fadf424b --- /dev/null +++ b/awx/lib/site-packages/pip/backwardcompat/ssl_match_hostname.py @@ -0,0 +1,68 @@ +"""The match_hostname() function from Python 3.2, essential when using SSL.""" + +import re + +__version__ = '3.2a3' + +class CertificateError(ValueError): + pass + +def _dnsname_to_pat(dn, max_wildcards=1): + pats = [] + for frag in dn.split(r'.'): + if frag.count('*') > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survery of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + if frag == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + else: + # Otherwise, '*' matches any dotless fragment. + frag = re.escape(frag) + pats.append(frag.replace(r'\*', '[^.]*')) + return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + +def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules + are mostly followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_to_pat(value).match(hostname): + return + dnsnames.append(value) + if not san: + # The subject is only checked when subjectAltName is empty + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_to_pat(value).match(hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") diff --git a/awx/lib/site-packages/pip/basecommand.py b/awx/lib/site-packages/pip/basecommand.py new file mode 100644 index 0000000000..1a22c47f23 --- /dev/null +++ b/awx/lib/site-packages/pip/basecommand.py @@ -0,0 +1,207 @@ +"""Base Command class, and related routines""" + +import os +import socket +import sys +import tempfile +import traceback +import time +import optparse + +from pip.log import logger +from pip.download import urlopen +from pip.exceptions import (BadCommand, InstallationError, UninstallationError, + CommandError) +from pip.backwardcompat import StringIO +from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter +from pip.status_codes import SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND +from pip.util import get_prog + + +__all__ = ['Command'] + + +# for backwards compatibiliy +get_proxy = urlopen.get_proxy + + +class Command(object): + name = None + usage = None + hidden = False + + def __init__(self, main_parser): + parser_kw = { + 'usage': self.usage, + 'prog': '%s %s' % (get_prog(), self.name), + 'formatter': UpdatingDefaultsHelpFormatter(), + 'add_help_option': False, + 'name': self.name, + 'description': self.__doc__, + } + self.main_parser = main_parser + self.parser = ConfigOptionParser(**parser_kw) + + # Commands should add options to this option group + optgroup_name = '%s Options' % self.name.capitalize() + self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) + + # Re-add all options and option groups. + for group in main_parser.option_groups: + self._copy_option_group(self.parser, group) + + # Copies all general options from the main parser. + self._copy_options(self.parser, main_parser.option_list) + + def _copy_options(self, parser, options): + """Populate an option parser or group with options.""" + for option in options: + if not option.dest: + continue + parser.add_option(option) + + def _copy_option_group(self, parser, group): + """Copy option group (including options) to another parser.""" + new_group = optparse.OptionGroup(parser, group.title) + self._copy_options(new_group, group.option_list) + + parser.add_option_group(new_group) + + def merge_options(self, initial_options, options): + # Make sure we have all global options carried over + attrs = ['log', 'proxy', 'require_venv', + 'log_explicit_levels', 'log_file', + 'timeout', 'default_vcs', + 'skip_requirements_regex', + 'no_input', 'exists_action', + 'cert'] + for attr in attrs: + setattr(options, attr, getattr(initial_options, attr) or getattr(options, attr)) + options.quiet += initial_options.quiet + options.verbose += initial_options.verbose + + def setup_logging(self): + pass + + def main(self, args, initial_options): + options, args = self.parser.parse_args(args) + self.merge_options(initial_options, options) + + level = 1 # Notify + level += options.verbose + level -= options.quiet + level = logger.level_for_integer(4 - level) + complete_log = [] + logger.consumers.extend( + [(level, sys.stdout), + (logger.DEBUG, complete_log.append)]) + if options.log_explicit_levels: + logger.explicit_levels = True + + self.setup_logging() + + #TODO: try to get these passing down from the command? + # without resorting to os.environ to hold these. + + if options.no_input: + os.environ['PIP_NO_INPUT'] = '1' + + if options.exists_action: + os.environ['PIP_EXISTS_ACTION'] = ''.join(options.exists_action) + + if options.cert: + os.environ['PIP_CERT'] = options.cert + + if options.require_venv: + # If a venv is required check if it can really be found + if not os.environ.get('VIRTUAL_ENV'): + logger.fatal('Could not find an activated virtualenv (required).') + sys.exit(VIRTUALENV_NOT_FOUND) + + if options.log: + log_fp = open_logfile(options.log, 'a') + logger.consumers.append((logger.DEBUG, log_fp)) + else: + log_fp = None + + socket.setdefaulttimeout(options.timeout or None) + + urlopen.setup(proxystr=options.proxy, prompting=not options.no_input) + + exit = SUCCESS + store_log = False + try: + status = self.run(options, args) + # FIXME: all commands should return an exit status + # and when it is done, isinstance is not needed anymore + if isinstance(status, int): + exit = status + except (InstallationError, UninstallationError): + e = sys.exc_info()[1] + logger.fatal(str(e)) + logger.info('Exception information:\n%s' % format_exc()) + store_log = True + exit = ERROR + except BadCommand: + e = sys.exc_info()[1] + logger.fatal(str(e)) + logger.info('Exception information:\n%s' % format_exc()) + store_log = True + exit = ERROR + except CommandError: + e = sys.exc_info()[1] + logger.fatal('ERROR: %s' % e) + logger.info('Exception information:\n%s' % format_exc()) + exit = ERROR + except KeyboardInterrupt: + logger.fatal('Operation cancelled by user') + logger.info('Exception information:\n%s' % format_exc()) + store_log = True + exit = ERROR + except: + logger.fatal('Exception:\n%s' % format_exc()) + store_log = True + exit = UNKNOWN_ERROR + if log_fp is not None: + log_fp.close() + if store_log: + log_fn = options.log_file + text = '\n'.join(complete_log) + try: + log_fp = open_logfile(log_fn, 'w') + except IOError: + temp = tempfile.NamedTemporaryFile(delete=False) + log_fn = temp.name + log_fp = open_logfile(log_fn, 'w') + logger.fatal('Storing complete log in %s' % log_fn) + log_fp.write(text) + log_fp.close() + return exit + + +def format_exc(exc_info=None): + if exc_info is None: + exc_info = sys.exc_info() + out = StringIO() + traceback.print_exception(*exc_info, **dict(file=out)) + return out.getvalue() + + +def open_logfile(filename, mode='a'): + """Open the named log file in append mode. + + If the file already exists, a separator will also be printed to + the file to separate past activity from current activity. + """ + filename = os.path.expanduser(filename) + filename = os.path.abspath(filename) + dirname = os.path.dirname(filename) + if not os.path.exists(dirname): + os.makedirs(dirname) + exists = os.path.exists(filename) + + log_fp = open(filename, mode) + if exists: + log_fp.write('%s\n' % ('-' * 60)) + log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c'))) + return log_fp diff --git a/awx/lib/site-packages/pip/baseparser.py b/awx/lib/site-packages/pip/baseparser.py new file mode 100644 index 0000000000..61a73ce002 --- /dev/null +++ b/awx/lib/site-packages/pip/baseparser.py @@ -0,0 +1,368 @@ +"""Base option parser setup""" + +import sys +import optparse +import pkg_resources +import os +import textwrap +from distutils.util import strtobool +from pip.backwardcompat import ConfigParser, string_types +from pip.locations import default_config_file, default_log_file +from pip.util import get_terminal_size, get_prog + + +class PrettyHelpFormatter(optparse.IndentedHelpFormatter): + """A prettier/less verbose help formatter for optparse.""" + + def __init__(self, *args, **kwargs): + # help position must be aligned with __init__.parseopts.description + kwargs['max_help_position'] = 30 + kwargs['indent_increment'] = 1 + kwargs['width'] = get_terminal_size()[0] - 2 + optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs) + + def format_option_strings(self, option): + return self._format_option_strings(option, ' <%s>', ', ') + + def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '): + """ + Return a comma-separated list of option strings and metavars. + + :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') + :param mvarfmt: metavar format string - evaluated as mvarfmt % metavar + :param optsep: separator + """ + opts = [] + + if option._short_opts: + opts.append(option._short_opts[0]) + if option._long_opts: + opts.append(option._long_opts[0]) + if len(opts) > 1: + opts.insert(1, optsep) + + if option.takes_value(): + metavar = option.metavar or option.dest.lower() + opts.append(mvarfmt % metavar.lower()) + + return ''.join(opts) + + def format_heading(self, heading): + if heading == 'Options': + return '' + return heading + ':\n' + + def format_usage(self, usage): + """ + Ensure there is only one newline between usage and the first heading + if there is no description. + """ + msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ") + return msg + + def format_description(self, description): + # leave full control over description to us + if description: + if hasattr(self.parser, 'main'): + label = 'Commands' + else: + label = 'Description' + #some doc strings have inital newlines, some don't + description = description.lstrip('\n') + #some doc strings have final newlines and spaces, some don't + description = description.rstrip() + #dedent, then reindent + description = self.indent_lines(textwrap.dedent(description), " ") + description = '%s:\n%s\n' % (label, description) + return description + else: + return '' + + def format_epilog(self, epilog): + # leave full control over epilog to us + if epilog: + return epilog + else: + return '' + + def indent_lines(self, text, indent): + new_lines = [indent + line for line in text.split('\n')] + return "\n".join(new_lines) + + +class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): + """Custom help formatter for use in ConfigOptionParser that updates + the defaults before expanding them, allowing them to show up correctly + in the help listing""" + + def expand_default(self, option): + if self.parser is not None: + self.parser.update_defaults(self.parser.defaults) + return optparse.IndentedHelpFormatter.expand_default(self, option) + + +class CustomOptionParser(optparse.OptionParser): + def insert_option_group(self, idx, *args, **kwargs): + """Insert an OptionGroup at a given position.""" + group = self.add_option_group(*args, **kwargs) + + self.option_groups.pop() + self.option_groups.insert(idx, group) + + return group + + @property + def option_list_all(self): + """Get a list of all options, including those in option groups.""" + res = self.option_list[:] + for i in self.option_groups: + res.extend(i.option_list) + + return res + + +class ConfigOptionParser(CustomOptionParser): + """Custom option parser which updates its defaults by by checking the + configuration files and environmental variables""" + + def __init__(self, *args, **kwargs): + self.config = ConfigParser.RawConfigParser() + self.name = kwargs.pop('name') + self.files = self.get_config_files() + self.config.read(self.files) + assert self.name + optparse.OptionParser.__init__(self, *args, **kwargs) + + def get_config_files(self): + config_file = os.environ.get('PIP_CONFIG_FILE', False) + if config_file and os.path.exists(config_file): + return [config_file] + return [default_config_file] + + def update_defaults(self, defaults): + """Updates the given defaults with values from the config files and + the environ. Does a little special handling for certain types of + options (lists).""" + # Then go and look for the other sources of configuration: + config = {} + # 1. config files + for section in ('global', self.name): + config.update(self.normalize_keys(self.get_config_section(section))) + # 2. environmental variables + config.update(self.normalize_keys(self.get_environ_vars())) + # Then set the options with those values + for key, val in config.items(): + option = self.get_option(key) + if option is not None: + # ignore empty values + if not val: + continue + # handle multiline configs + if option.action == 'append': + val = val.split() + else: + option.nargs = 1 + if option.action in ('store_true', 'store_false', 'count'): + val = strtobool(val) + try: + val = option.convert_value(key, val) + except optparse.OptionValueError: + e = sys.exc_info()[1] + print("An error occurred during configuration: %s" % e) + sys.exit(3) + defaults[option.dest] = val + return defaults + + def normalize_keys(self, items): + """Return a config dictionary with normalized keys regardless of + whether the keys were specified in environment variables or in config + files""" + normalized = {} + for key, val in items: + key = key.replace('_', '-') + if not key.startswith('--'): + key = '--%s' % key # only prefer long opts + normalized[key] = val + return normalized + + def get_config_section(self, name): + """Get a section of a configuration""" + if self.config.has_section(name): + return self.config.items(name) + return [] + + def get_environ_vars(self, prefix='PIP_'): + """Returns a generator with all environmental vars with prefix PIP_""" + for key, val in os.environ.items(): + if key.startswith(prefix): + yield (key.replace(prefix, '').lower(), val) + + def get_default_values(self): + """Overridding to make updating the defaults after instantiation of + the option parser possible, update_defaults() does the dirty work.""" + if not self.process_default_values: + # Old, pre-Optik 1.5 behaviour. + return optparse.Values(self.defaults) + + defaults = self.update_defaults(self.defaults.copy()) # ours + for option in self._get_all_options(): + default = defaults.get(option.dest) + if isinstance(default, string_types): + opt_str = option.get_opt_string() + defaults[option.dest] = option.check_value(opt_str, default) + return optparse.Values(defaults) + + def error(self, msg): + self.print_usage(sys.stderr) + self.exit(2, "%s\n" % msg) + + +try: + pip_dist = pkg_resources.get_distribution('pip') + version = '%s from %s (python %s)' % ( + pip_dist, pip_dist.location, sys.version[:3]) +except pkg_resources.DistributionNotFound: + # when running pip.py without installing + version = None + + +def create_main_parser(): + parser_kw = { + 'usage': '\n%prog [options]', + 'add_help_option': False, + 'formatter': UpdatingDefaultsHelpFormatter(), + 'name': 'global', + 'prog': get_prog(), + } + + parser = ConfigOptionParser(**parser_kw) + genopt = optparse.OptionGroup(parser, 'General Options') + parser.disable_interspersed_args() + + # having a default version action just causes trouble + parser.version = version + + for opt in standard_options: + genopt.add_option(opt) + parser.add_option_group(genopt) + + return parser + + +standard_options = [ + optparse.make_option( + '-h', '--help', + dest='help', + action='help', + help='Show help.'), + + optparse.make_option( + # Run only if inside a virtualenv, bail if not. + '--require-virtualenv', '--require-venv', + dest='require_venv', + action='store_true', + default=False, + help=optparse.SUPPRESS_HELP), + + optparse.make_option( + '-v', '--verbose', + dest='verbose', + action='count', + default=0, + help='Give more output. Option is additive, and can be used up to 3 times.'), + + optparse.make_option( + '-V', '--version', + dest='version', + action='store_true', + help='Show version and exit.'), + + optparse.make_option( + '-q', '--quiet', + dest='quiet', + action='count', + default=0, + help='Give less output.'), + + optparse.make_option( + '--log', + dest='log', + metavar='file', + help='Log file where a complete (maximum verbosity) record will be kept.'), + + optparse.make_option( + # Writes the log levels explicitely to the log' + '--log-explicit-levels', + dest='log_explicit_levels', + action='store_true', + default=False, + help=optparse.SUPPRESS_HELP), + + optparse.make_option( + # The default log file + '--local-log', '--log-file', + dest='log_file', + metavar='file', + default=default_log_file, + help=optparse.SUPPRESS_HELP), + + optparse.make_option( + # Don't ask for input + '--no-input', + dest='no_input', + action='store_true', + default=False, + help=optparse.SUPPRESS_HELP), + + optparse.make_option( + '--proxy', + dest='proxy', + type='str', + default='', + help="Specify a proxy in the form [user:passwd@]proxy.server:port."), + + optparse.make_option( + '--timeout', '--default-timeout', + metavar='sec', + dest='timeout', + type='float', + default=15, + help='Set the socket timeout (default %default seconds).'), + + optparse.make_option( + # The default version control system for editables, e.g. 'svn' + '--default-vcs', + dest='default_vcs', + type='str', + default='', + help=optparse.SUPPRESS_HELP), + + optparse.make_option( + # A regex to be used to skip requirements + '--skip-requirements-regex', + dest='skip_requirements_regex', + type='str', + default='', + help=optparse.SUPPRESS_HELP), + + optparse.make_option( + # Option when path already exist + '--exists-action', + dest='exists_action', + type='choice', + choices=['s', 'i', 'w', 'b'], + default=[], + action='append', + metavar='action', + help="Default action when a path already exists: " + "(s)witch, (i)gnore, (w)ipe, (b)ackup."), + + optparse.make_option( + '--cert', + dest='cert', + type='str', + default='', + metavar='path', + help = "Path to alternate CA bundle."), + + ] diff --git a/awx/lib/site-packages/pip/cacert.pem b/awx/lib/site-packages/pip/cacert.pem new file mode 100644 index 0000000000..99b310bce9 --- /dev/null +++ b/awx/lib/site-packages/pip/cacert.pem @@ -0,0 +1,3895 @@ +## +## ca-bundle.crt -- Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Sat Dec 29 20:03:40 2012 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1 +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## + +# @(#) $RCSfile: certdata.txt,v $ $Revision: 1.87 $ $Date: 2012/12/29 16:32:45 $ + +GTE CyberTrust Global Root +========================== +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg +Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG +A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz +MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL +Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 +IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u +sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql +HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID +AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW +M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF +NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +Thawte Server CA +================ +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE +AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j +b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV +BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u +c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG +A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 +ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl +/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 +1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J +GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ +GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE +AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl +ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU +VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 +aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ +cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 +aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh +Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ +qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm +SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf +8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t +UCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +Digital Signature Trust Co. Global CA 1 +======================================= +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIENnAVljANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE +ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMTAeFw05ODEy +MTAxODEwMjNaFw0xODEyMTAxODQwMjNaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs +IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUxMIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQCgbIGpzzQeJN3+hijM3oMv+V7UQtLodGBmE5gGHKlREmlvMVW5SXIACH7TpWJE +NySZj9mDSI+ZbZUTu0M7LklOiDfBu1h//uG9+LthzfNHwJmm8fOR6Hh8AMthyUQncWlVSn5JTe2i +o74CTADKAqjuAQIxZA9SLRN0dja1erQtcQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo +BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 +dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTExDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw +IoAPMTk5ODEyMTAxODEwMjNagQ8yMDE4MTIxMDE4MTAyM1owCwYDVR0PBAQDAgEGMB8GA1UdIwQY +MBaAFGp5fpFpRhgTCgJ3pVlbYJglDqL4MB0GA1UdDgQWBBRqeX6RaUYYEwoCd6VZW2CYJQ6i+DAM +BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB +ACIS2Hod3IEGtgllsofIH160L+nEHvI8wbsEkBFKg05+k7lNQseSJqBcNJo4cvj9axY+IO6CizEq +kzaFI4iKPANo08kJD038bKTaKHKTDomAsH3+gG9lbRgzl4vCa4nuYD3Im+9/KzJic5PLPON74nZ4 +RbyhkwS7hp86W0N6w4pl +-----END CERTIFICATE----- + +Digital Signature Trust Co. Global CA 3 +======================================= +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIENm7TzjANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE +ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMjAeFw05ODEy +MDkxOTE3MjZaFw0xODEyMDkxOTQ3MjZaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs +IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUyMIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQC/k48Xku8zExjrEH9OFr//Bo8qhbxe+SSmJIi2A7fBw18DW9Fvrn5C6mYjuGOD +VvsoLeE4i7TuqAHhzhy2iCoiRoX7n6dwqUcUP87eZfCocfdPJmyMvMa1795JJ/9IKn3oTQPMx7JS +xhcxEzu1TdvIxPbDDyQq2gyd55FbgM2UnQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo +BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 +dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTIxDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw +IoAPMTk5ODEyMDkxOTE3MjZagQ8yMDE4MTIwOTE5MTcyNlowCwYDVR0PBAQDAgEGMB8GA1UdIwQY +MBaAFB6CTShlgDzJQW6sNS5ay97u+DlbMB0GA1UdDgQWBBQegk0oZYA8yUFurDUuWsve7vg5WzAM +BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB +AEeNg61i8tuwnkUiBbmi1gMOOHLnnvx75pO2mqWilMg0HZHRxdf0CiUPPXiBng+xZ8SQTGPdXqfi +up/1902lMXucKS1M/mQ+7LZT/uqb7YLbdHVLB3luHtgZg3Pe9T7Qtd7nS2h9Qy4qIOF+oHhEngj1 +mPnHfxsb1gYgAlihw6ID +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBAgUAA4GBALtMEivPLCYA +TxQT3ab7/AoRhIzzKBxnki98tsX63/Dolbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59Ah +WM1pF+NEHJwZRDmJXNycAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2Omuf +Tqj/ZA1k +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEEzH6qqYPnHTkxD4PTqJkZIwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCq0Lq+Fi24g9TK0g+8djHKlNgd +k4xWArzZbxpvUjZudVYKVdPfQ4chEWWKfo+9Id5rMj8bhDSVBZ1BNeuS65bdqlk/AVNtmU/t5eIq +WpDBucSmFc/IReumXY6cPvBkJHalzasab7bYe1FhbqZ/h8jit+U03EGI6glAvnOSPWvndQIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAKlPww3HZ74sy9mozS11534Vnjty637rXC0Jh9ZrbWB85a7FkCMM +XErQr7Fd88e2CtvgFZMN3QO8x3aKtd1Pw5sTdbgBwObJW2uluIncrKTdcu1OofdPvAbT6shkdHvC +lUGcZXNY8ZCaPGqxmMnEh7zPRW1F4m4iP/68DzFc6PLZ +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAzCCAmwCEQC5L2DMiJ+hekYJuFtwbIqvMA0GCSqGSIb3DQEBBQUAMIHBMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGljIFByaW1h +cnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNp +Z24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazAeFw05ODA1MTgwMDAwMDBaFw0yODA4MDEyMzU5NTlaMIHBMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGljIFByaW1h +cnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNp +Z24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAp4gBIXQs5xoD8JjhlzwPIQjx +nNuX6Zr8wgQGE75fUsjMHiwSViy4AWkszJkfrbCWrnkE8hM5wXuYuggs6MKEEyyqaekJ9MepAqRC +wiNPStjwDqL7MWzJ5m+ZJwf15vRMeJ5t60aG+rmGyVTyssSv1EYcWskVMP8NbPUtDm3Of3cCAwEA +ATANBgkqhkiG9w0BAQUFAAOBgQByLvl/0fFx+8Se9sVeUYpAmLho+Jscg9jinb3/7aHmZuovCfTK +1+qlK5X2JGCGTUQug6XELaDTrnhpb3LabK4I8GOSN+a7xDAXrXfMSTWqz9iP0b63GJZHc2pUIjRk +LbYWm1lbtFFZOrMLFPQS32eg9K0yZF6xRnInjBJ7xUS0rg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO +FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 +lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT +1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD +Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +ValiCert Class 1 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy +MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi +GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm +DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG +lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX +icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP +Orf1LXLI +-----END CERTIFICATE----- + +ValiCert Class 2 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC +CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf +ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ +SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV +UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 +W9ViH0Pd +-----END CERTIFICATE----- + +RSA Root Certificate 1 +====================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td +3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H +BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs +3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF +V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r +on+jjBXu +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDEgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAN2E1Lm0+afY8wR4nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/E +bRrsC+MO8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjVojYJ +rKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjbPG7PoBMAGrgnoeS+ +Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP26KbqxzcSXKMpHgLZ2x87tNcPVkeB +FQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vrn5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +q2aN17O6x5q25lXQBfGfMY1aqtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/N +y9Sn2WCVhDr4wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3 +ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrspSCAaWihT37h +a88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4E1Z5T21Q6huwtVexN2ZYI/Pc +D98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g== +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1c3QgTmV0d29y +azE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ug +b25seTFFMEMGA1UEAxM8VmVyaVNpZ24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9y +aXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArwoNwtUs22e5LeWUJ92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6 +tW8UvxDOJxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUYwZF7 +C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9okoqQHgiBVrKtaaNS +0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjNqWm6o+sdDZykIKbBoMXRRkwXbdKs +Zj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/ESrg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0 +JhU8wI1NQ0kdvekhktdmnLfexbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf +0xwLRtxyID+u7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU +sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RIsH/7NiXaldDx +JBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTPcjnhsUPgKM+351psE2tJs//j +GHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Secure Server CA +============================ +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg +cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl +ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG +A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi +eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p +dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ +aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 +gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw +ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw +CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l +dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw +NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow +HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA +BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN +Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 +n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0xOTEyMjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo3QwcjARBglghkgBhvhC +AQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGAvtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdER +gL7YibkIozH5oSQJFrlwMB0GCSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0B +AQUFAAOCAQEAWUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo +oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQh7A6tcOdBTcS +o8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18f3v/rxzP5tsHrV7bhZ3QKw0z +2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfNB/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjX +OP/swNlQ8C5LWK5Gb9Auw2DaclVyvUxFnmG6v4SBkgPR0ml8xQ== +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp +bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx +HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds +b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV +PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN +qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn +hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs +MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN +I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY +NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB +LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE +ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz +IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ +1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a +IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk +MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW +Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF +AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 +lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ +KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 2 +============================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEXMBUGA1UE +ChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0y +MB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoT +DkVxdWlmYXggU2VjdXJlMSYwJAYDVQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn +2Z0GvxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/BPO3QSQ5 +BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0CAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUx +JjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTkwNjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9e +uSBIplBqy/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAAyGgq3oThr1 +jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia +78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUm +V+GRMOrN +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +UTN-USER First-Network Applications +=================================== +-----BEGIN CERTIFICATE----- +MIIEZDCCA0ygAwIBAgIQRL4Mi1AAJLQR0zYwS8AzdzANBgkqhkiG9w0BAQUFADCBozELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xKzAp +BgNVBAMTIlVUTi1VU0VSRmlyc3QtTmV0d29yayBBcHBsaWNhdGlvbnMwHhcNOTkwNzA5MTg0ODM5 +WhcNMTkwNzA5MTg1NzQ5WjCBozELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5T +YWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xKzApBgNVBAMTIlVUTi1VU0VSRmlyc3QtTmV0d29yayBB +cHBsaWNhdGlvbnMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCz+5Gh5DZVhawGNFug +mliy+LUPBXeDrjKxdpJo7CNKyXY/45y2N3kDuatpjQclthln5LAbGHNhSuh+zdMvZOOmfAz6F4Cj +DUeJT1FxL+78P/m4FoCHiZMlIJpDgmkkdihZNaEdwH+DBmQWICzTSaSFtMBhf1EI+GgVkYDLpdXu +Ozr0hAReYFmnjDRy7rh4xdE7EkpvfmUnuaRVxblvQ6TFHSyZwFKkeEwVs0CYCGtDxgGwenv1axwi +P8vv/6jQOkt2FZ7S0cYu49tXGzKiuG/ohqY/cKvlcJKrRB5AUPuco2LkbG6gyN7igEL66S/ozjIE +j3yNtxyjNTwV3Z7DrpelAgMBAAGjgZEwgY4wCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8w +HQYDVR0OBBYEFPqGydvguul49Uuo1hXf8NPhahQ8ME8GA1UdHwRIMEYwRKBCoECGPmh0dHA6Ly9j +cmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LU5ldHdvcmtBcHBsaWNhdGlvbnMuY3JsMA0G +CSqGSIb3DQEBBQUAA4IBAQCk8yXM0dSRgyLQzDKrm5ZONJFUICU0YV8qAhXhi6r/fWRRzwr/vH3Y +IWp4yy9Rb/hCHTO967V7lMPDqaAt39EpHx3+jz+7qEUqf9FuVSTiuwL7MT++6LzsQCv4AdRWOOTK +RIK1YSAhZ2X28AvnNPilwpyjXEAfhZOVBt5P1CeptqX8Fs1zMT+4ZSfP1FMa8Kxun08FDAOBp4Qp +xFq9ZFdyrTvPNximmMatBrTcCKME1SmklpoSZ0qMYEWd8SOasACcaLWYUNPvji6SZbFIPiG+FTAq +DbUMo2s/rn9X9R+WfN9v3YIwLGUbQErNaLly7HF27FSOH4UMAWr6pjisH8SE +-----END CERTIFICATE----- + +America Online Root Certification Authority 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG +v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z +DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh +sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP +8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z +o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf +GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF +VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft +3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g +Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +America Online Root Certification Authority 2 +============================================= +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en +fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 +f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO +qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN +RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 +gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn +6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid +FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 +Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj +B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op +aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY +T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p ++DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg +JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy +zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO +ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh +1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf +GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff +Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP +cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 1 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBJDANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MxIENBMB4XDTAxMDQwNjEwNDkxM1oXDTIxMDQw +NjEwNDkxM1owOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALWJHytPZwp5/8Ue+H88 +7dF+2rDNbS82rDTG29lkFwhjMDMiikzujrsPDUJVyZ0upe/3p4zDq7mXy47vPxVnqIJyY1MPQYx9 +EJUkoVqlBvqSV536pQHydekfvFYmUk54GWVYVQNYwBSujHxVX3BbdyMGNpfzJLWaRpXk3w0LBUXl +0fIdgrvGE+D+qnr9aTCU89JFhfzyMlsy3uhsXR/LpCJ0sICOXZT3BgBLqdReLjVQCfOAl/QMF645 +2F/NM8EcyonCIvdFEu1eEpOdY6uCLrnrQkFEy0oaAIINnvmLVz5MxxftLItyM19yejhW1ebZrgUa +HXVFsculJRwSVzb9IjcCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQIR+IMi/ZT +iFIwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCLGrLJXWG04bkruVPRsoWdd44W7hE9 +28Jj2VuXZfsSZ9gqXLar5V7DtxYvyOirHYr9qxp81V9jz9yw3Xe5qObSIjiHBxTZ/75Wtf0HDjxV +yhbMp6Z3N/vbXB9OWQaHowND9Rart4S9Tu+fMTfwRvFAttEMpWT4Y14h21VOTzF2nBBhjrZTOqMR +vq9tfB69ri3iDGnHhVNoomG6xT60eVR4ngrHAr5i0RGCS2UvkVrCqIexVmiUefkl98HVrhq4uz2P +qYo4Ffdz0Fpg0YCw8NzVUM1O7pJIae2yIx4wzMiUyLb1O4Z/P6Yun/Y+LLWSlj7fLJOK/4GMDw9Z +IRlXvVWa +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +TDC Internet Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE +ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx +NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu +ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j +xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL +znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc +5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 +otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI +AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM +VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM +MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC +AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe +UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G +CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m +gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ +2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb +O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU +Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l +-----END CERTIFICATE----- + +TDC OCES Root CA +================ +-----BEGIN CERTIFICATE----- +MIIFGTCCBAGgAwIBAgIEPki9xDANBgkqhkiG9w0BAQUFADAxMQswCQYDVQQGEwJESzEMMAoGA1UE +ChMDVERDMRQwEgYDVQQDEwtUREMgT0NFUyBDQTAeFw0wMzAyMTEwODM5MzBaFw0zNzAyMTEwOTA5 +MzBaMDExCzAJBgNVBAYTAkRLMQwwCgYDVQQKEwNUREMxFDASBgNVBAMTC1REQyBPQ0VTIENBMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArGL2YSCyz8DGhdfjeebM7fI5kqSXLmSjhFuH +nEz9pPPEXyG9VhDr2y5h7JNp46PMvZnDBfwGuMo2HP6QjklMxFaaL1a8z3sM8W9Hpg1DTeLpHTk0 +zY0s2RKY+ePhwUp8hjjEqcRhiNJerxomTdXkoCJHhNlktxmW/OwZ5LKXJk5KTMuPJItUGBxIYXvV +iGjaXbXqzRowwYCDdlCqT9HU3Tjw7xb04QxQBr/q+3pJoSgrHPb8FTKjdGqPqcNiKXEx5TukYBde +dObaE+3pHx8b0bJoc8YQNHVGEBDjkAB2QMuLt0MJIf+rTpPGWOmlgtt3xDqZsXKVSQTwtyv6e1mO +3QIDAQABo4ICNzCCAjMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwgewGA1UdIASB +5DCB4TCB3gYIKoFQgSkBAQEwgdEwLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuY2VydGlmaWthdC5k +ay9yZXBvc2l0b3J5MIGdBggrBgEFBQcCAjCBkDAKFgNUREMwAwIBARqBgUNlcnRpZmlrYXRlciBm +cmEgZGVubmUgQ0EgdWRzdGVkZXMgdW5kZXIgT0lEIDEuMi4yMDguMTY5LjEuMS4xLiBDZXJ0aWZp +Y2F0ZXMgZnJvbSB0aGlzIENBIGFyZSBpc3N1ZWQgdW5kZXIgT0lEIDEuMi4yMDguMTY5LjEuMS4x +LjARBglghkgBhvhCAQEEBAMCAAcwgYEGA1UdHwR6MHgwSKBGoESkQjBAMQswCQYDVQQGEwJESzEM +MAoGA1UEChMDVERDMRQwEgYDVQQDEwtUREMgT0NFUyBDQTENMAsGA1UEAxMEQ1JMMTAsoCqgKIYm +aHR0cDovL2NybC5vY2VzLmNlcnRpZmlrYXQuZGsvb2Nlcy5jcmwwKwYDVR0QBCQwIoAPMjAwMzAy +MTEwODM5MzBagQ8yMDM3MDIxMTA5MDkzMFowHwYDVR0jBBgwFoAUYLWF7FZkfhIZJ2cdUBVLc647 ++RIwHQYDVR0OBBYEFGC1hexWZH4SGSdnHVAVS3OuO/kSMB0GCSqGSIb2fQdBAAQQMA4bCFY2LjA6 +NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEACromJkbTc6gJ82sLMJn9iuFXehHTuJTXCRBuo7E4 +A9G28kNBKWKnctj7fAXmMXAnVBhOinxO5dHKjHiIzxvTkIvmI/gLDjNDfZziChmPyQE+dF10yYsc +A+UYyAFMP8uXBV2YcaaYb7Z8vTd/vuGTJW1v8AqtFxjhA7wHKcitJuj4YfD9IQl+mo6paH1IYnK9 +AOoBmbgGglGBTvH1tJFUuSN6AJqfXY3gPGS5GhKSKseCRHI53OI8xthV9RVOyAUO28bQYqbsFbS1 +AoLbrIyigfCbmTH1ICCoiGEKB5+U/NDXG8wuF/MEJ3Zn61SD/aSQfgY9BKNDLdr8C2LqL19iUw== +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Email Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIEojCCA4qgAwIBAgIQRL4Mi1AAJLQR0zYlJWfJiTANBgkqhkiG9w0BAQUFADCBrjELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xNjA0 +BgNVBAMTLVVUTi1VU0VSRmlyc3QtQ2xpZW50IEF1dGhlbnRpY2F0aW9uIGFuZCBFbWFpbDAeFw05 +OTA3MDkxNzI4NTBaFw0xOTA3MDkxNzM2NThaMIGuMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQx +FzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsx +ITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRydXN0LmNvbTE2MDQGA1UEAxMtVVROLVVTRVJGaXJz +dC1DbGllbnQgQXV0aGVudGljYXRpb24gYW5kIEVtYWlsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsjmFpPJ9q0E7YkY3rs3BYHW8OWX5ShpHornMSMxqmNVNNRm5pELlzkniii8efNIx +B8dOtINknS4p1aJkxIW9hVE1eaROaJB7HHqkkqgX8pgV8pPMyaQylbsMTzC9mKALi+VuG6JG+ni8 +om+rWV6lL8/K2m2qL+usobNqqrcuZzWLeeEeaYji5kbNoKXqvgvOdjp6Dpvq/NonWz1zHyLmSGHG +TPNpsaguG7bUMSAsvIKKjqQOpdeJQ/wWWq8dcdcRWdq6hw2v+vPhwvCkxWeM1tZUOt4KpLoDd7Nl +yP0e03RiqhjKaJMeoYV+9Udly/hNVyh00jT/MLbu9mIwFIws6wIDAQABo4G5MIG2MAsGA1UdDwQE +AwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSJgmd9xJ0mcABLtFBIfN49rgRufTBYBgNV +HR8EUTBPME2gS6BJhkdodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLVVTRVJGaXJzdC1DbGll +bnRBdXRoZW50aWNhdGlvbmFuZEVtYWlsLmNybDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwDQYJKoZIhvcNAQEFBQADggEBALFtYV2mGn98q0rkMPxTbyUkxsrt4jFcKw7u7mFVbwQ+zzne +xRtJlOTrIEy05p5QLnLZjfWqo7NK2lYcYJeA3IKirUq9iiv/Cwm0xtcgBEXkzYABurorbs6q15L+ +5K/r9CYdFip/bDCVNy8zEqx/3cfREYxRmLLQo5HQrfafnoOTHh1CuEava2bwm3/q4wMC5QJRwarV +NZ1yQAOJujEdxRBoUp7fooXFXAimeOZTT7Hot9MUnpOmw2TjrH5xzbyf6QMbzPvprDHBr3wVdAKZ +w7JHpsIyYdfHb0gkUSeh1YdV8nuPmD0Wnu51tvjQjvLzxq4oW6fw8zYX/MMF08oDSlQ= +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +UTN USERFirst Object Root CA +============================ +-----BEGIN CERTIFICATE----- +MIIEZjCCA06gAwIBAgIQRL4Mi1AAJLQR0zYt4LNfGzANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHTAb +BgNVBAMTFFVUTi1VU0VSRmlyc3QtT2JqZWN0MB4XDTk5MDcwOTE4MzEyMFoXDTE5MDcwOTE4NDAz +NlowgZUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJVVDEXMBUGA1UEBxMOU2FsdCBMYWtlIENpdHkx +HjAcBgNVBAoTFVRoZSBVU0VSVFJVU1QgTmV0d29yazEhMB8GA1UECxMYaHR0cDovL3d3dy51c2Vy +dHJ1c3QuY29tMR0wGwYDVQQDExRVVE4tVVNFUkZpcnN0LU9iamVjdDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAM6qgT+jo2F4qjEAVZURnicPHxzfOpuCaDDASmEd8S8O+r5596Uj71VR +loTN2+O5bj4x2AogZ8f02b+U60cEPgLOKqJdhwQJ9jCdGIqXsqoc/EHSoTbL+z2RuufZcDX65OeQ +w5ujm9M89RKZd7G3CeBo5hy485RjiGpq/gt2yb70IuRnuasaXnfBhQfdDWy/7gbHd2pBnqcP1/vu +lBe3/IW+pKvEHDHd17bR5PDv3xaPslKT16HUiaEHLr/hARJCHhrh2JU022R5KP+6LhHC5ehbkkj7 +RwvCbNqtMoNB86XlQXD9ZZBt+vpRxPm9lisZBCzTbafc8H9vg2XiaquHhnUCAwEAAaOBrzCBrDAL +BgNVHQ8EBAMCAcYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU2u1kdBScFDyr3ZmpvVsoTYs8 +ydgwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtT2JqZWN0LmNybDApBgNVHSUEIjAgBggrBgEFBQcDAwYIKwYBBQUHAwgGCisGAQQBgjcKAwQw +DQYJKoZIhvcNAQEFBQADggEBAAgfUrE3RHjb/c652pWWmKpVZIC1WkDdIaXFwfNfLEzIR1pp6ujw +NTX00CXzyKakh0q9G7FzCL3Uw8q2NbtZhncxzaeAFK4T7/yxSPlrJSUtUbYsbUXBmMiKVl0+7kNO +PmsnjtA6S4ULX9Ptaqd1y9Fahy85dRNacrACgZ++8A+EVCBibGnU4U3GDZlDAQ0Slox4nb9QorFE +qmrPF3rPbw/U+CRVX/A0FklmPlBGyWNxODFiuGK581OtbLUrohKqGU8J2l7nk8aOFAj+8DCAGKCG +hU3IfdeLA/5u1fedFqySLKAj5ZyRUh+U3xeUc8OzwcFxBSAAeL0TUh2oPs0AH8g= +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Qualified (Class QA) Root +================================= +-----BEGIN CERTIFICATE----- +MIIG0TCCBbmgAwIBAgIBezANBgkqhkiG9w0BAQUFADCByTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMUIwQAYDVQQDEzlOZXRMb2NrIE1pbm9zaXRldHQgS296amVn +eXpvaSAoQ2xhc3MgUUEpIFRhbnVzaXR2YW55a2lhZG8xHjAcBgkqhkiG9w0BCQEWD2luZm9AbmV0 +bG9jay5odTAeFw0wMzAzMzAwMTQ3MTFaFw0yMjEyMTUwMTQ3MTFaMIHJMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRvbnNhZ2kgS2Z0 +LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxQjBABgNVBAMTOU5ldExvY2sgTWlub3NpdGV0 +dCBLb3pqZWd5em9pIChDbGFzcyBRQSkgVGFudXNpdHZhbnlraWFkbzEeMBwGCSqGSIb3DQEJARYP +aW5mb0BuZXRsb2NrLmh1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx1Ilstg91IRV +CacbvWy5FPSKAtt2/GoqeKvld/Bu4IwjZ9ulZJm53QE+b+8tmjwi8F3JV6BVQX/yQ15YglMxZc4e +8ia6AFQer7C8HORSjKAyr7c3sVNnaHRnUPYtLmTeriZ539+Zhqurf4XsoPuAzPS4DB6TRWO53Lhb +m+1bOdRfYrCnjnxmOCyqsQhjF2d9zL2z8cM/z1A57dEZgxXbhxInlrfa6uWdvLrqOU+L73Sa58XQ +0uqGURzk/mQIKAR5BevKxXEOC++r6uwSEaEYBTJp0QwsGj0lmT+1fMptsK6ZmfoIYOcZwvK9UdPM +0wKswREMgM6r3JSda6M5UzrWhQIDAMV9o4ICwDCCArwwEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAQYwggJ1BglghkgBhvhCAQ0EggJmFoICYkZJR1lFTEVNISBFemVuIHRhbnVzaXR2 +YW55IGEgTmV0TG9jayBLZnQuIE1pbm9zaXRldHQgU3pvbGdhbHRhdGFzaSBTemFiYWx5emF0YWJh +biBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBBIG1pbm9zaXRldHQgZWxla3Ryb25p +a3VzIGFsYWlyYXMgam9naGF0YXMgZXJ2ZW55ZXN1bGVzZW5laywgdmFsYW1pbnQgZWxmb2dhZGFz +YW5hayBmZWx0ZXRlbGUgYSBNaW5vc2l0ZXR0IFN6b2xnYWx0YXRhc2kgU3phYmFseXphdGJhbiwg +YXogQWx0YWxhbm9zIFN6ZXJ6b2Rlc2kgRmVsdGV0ZWxla2JlbiBlbG9pcnQgZWxsZW5vcnplc2kg +ZWxqYXJhcyBtZWd0ZXRlbGUuIEEgZG9rdW1lbnR1bW9rIG1lZ3RhbGFsaGF0b2sgYSBodHRwczov +L3d3dy5uZXRsb2NrLmh1L2RvY3MvIGNpbWVuIHZhZ3kga2VyaGV0b2sgYXogaW5mb0BuZXRsb2Nr +Lm5ldCBlLW1haWwgY2ltZW4uIFdBUk5JTkchIFRoZSBpc3N1YW5jZSBhbmQgdGhlIHVzZSBvZiB0 +aGlzIGNlcnRpZmljYXRlIGFyZSBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIFF1YWxpZmllZCBDUFMg +YXZhaWxhYmxlIGF0IGh0dHBzOi8vd3d3Lm5ldGxvY2suaHUvZG9jcy8gb3IgYnkgZS1tYWlsIGF0 +IGluZm9AbmV0bG9jay5uZXQwHQYDVR0OBBYEFAlqYhaSsFq7VQ7LdTI6MuWyIckoMA0GCSqGSIb3 +DQEBBQUAA4IBAQCRalCc23iBmz+LQuM7/KbD7kPgz/PigDVJRXYC4uMvBcXxKufAQTPGtpvQMznN +wNuhrWw3AkxYQTvyl5LGSKjN5Yo5iWH5Upfpvfb5lHTocQ68d4bDBsxafEp+NFAwLvt/MpqNPfMg +W/hqyobzMUwsWYACff44yTB1HLdV47yfuqhthCgFdbOLDcCRVCHnpgu0mfVRQdzNo0ci2ccBgcTc +R08m6h/t280NmPSjnLRzMkqWmf68f8glWPhY83ZmiVSkpj7EUFy6iRiCdUgh0k8T6GB+B3bbELVR +5qq5aKrN9p2QdRLqOBrKROi3macqaJVmlaut74nLYKkGEsaUR+ko +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +NetLock Business (Class B) Root +=============================== +-----BEGIN CERTIFICATE----- +MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg +VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD +VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv +bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg +VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S +o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr +1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ +RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh +dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 +ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv +c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg +YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh +c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz +Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA +bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl +IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 +YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj +cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM +43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR +stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI +-----END CERTIFICATE----- + +NetLock Express (Class C) Root +============================== +-----BEGIN CERTIFICATE----- +MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ +BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j +ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z +W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 +euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw +DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN +RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn +YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB +IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i +aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 +ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs +ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo +dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y +emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k +IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ +UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg +YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 +xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW +gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Firmaprofesional Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEVzCCAz+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBnTELMAkGA1UEBhMCRVMxIjAgBgNVBAcT +GUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMTOUF1dG9yaWRhZCBkZSBDZXJ0aWZp +Y2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODEmMCQGCSqGSIb3DQEJARYXY2FA +ZmlybWFwcm9mZXNpb25hbC5jb20wHhcNMDExMDI0MjIwMDAwWhcNMTMxMDI0MjIwMDAwWjCBnTEL +MAkGA1UEBhMCRVMxIjAgBgNVBAcTGUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMT +OUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2 +ODEmMCQGCSqGSIb3DQEJARYXY2FAZmlybWFwcm9mZXNpb25hbC5jb20wggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDnIwNvbyOlXnjOlSztlB5uCp4Bx+ow0Syd3Tfom5h5VtP8c9/Qit5V +j1H5WuretXDE7aTt/6MNbg9kUDGvASdYrv5sp0ovFy3Tc9UTHI9ZpTQsHVQERc1ouKDAA6XPhUJH +lShbz++AbOCQl4oBPB3zhxAwJkh91/zpnZFx/0GaqUC1N5wpIE8fUuOgfRNtVLcK3ulqTgesrBlf +3H5idPayBQC6haD9HThuy1q7hryUZzM1gywfI834yJFxzJeL764P3CkDG8A563DtwW4O2GcLiam8 +NeTvtjS0pbbELaW+0MOUJEjb35bTALVmGotmBQ/dPz/LP6pemkr4tErvlTcbAgMBAAGjgZ8wgZww +KgYDVR0RBCMwIYYfaHR0cDovL3d3dy5maXJtYXByb2Zlc2lvbmFsLmNvbTASBgNVHRMBAf8ECDAG +AQH/AgEBMCsGA1UdEAQkMCKADzIwMDExMDI0MjIwMDAwWoEPMjAxMzEwMjQyMjAwMDBaMA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUMwugZtHq2s7eYpMEKFK1FH84aLcwDQYJKoZIhvcNAQEFBQAD +ggEBAEdz/o0nVPD11HecJ3lXV7cVVuzH2Fi3AQL0M+2TUIiefEaxvT8Ub/GzR0iLjJcG1+p+o1wq +u00vR+L4OQbJnC4xGgN49Lw4xiKLMzHwFgQEffl25EvXwOaD7FnMP97/T2u3Z36mhoEyIwOdyPdf +wUpgpZKpsaSgYMN4h7Mi8yrrW6ntBas3D7Hi05V2Y1Z0jFhyGzflZKG+TQyTmAyX9odtsz/ny4Cm +7YjHX1BiAuiZdBbQ5rQ58SfLyEDW44YQqSMSkuBpQWOnryULwMWSyx6Yo1q6xTMPoJcB3X/ge9YG +VM+h4k0460tQtcsm9MracEpqoeJ5quGnM/b9Sh/22WA= +-----END CERTIFICATE----- + +Wells Fargo Root CA +=================== +-----BEGIN CERTIFICATE----- +MIID5TCCAs2gAwIBAgIEOeSXnjANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC1dlbGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTEvMC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDAxMDExMTY0MTI4WhcNMjEwMTE0MTY0MTI4WjCBgjELMAkGA1UEBhMCVVMxFDASBgNVBAoTC1dl +bGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEv +MC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVqDM7Jvk0/82bfuUER84A4n135zHCLielTWi5MbqNQ1mX +x3Oqfz1cQJ4F5aHiidlMuD+b+Qy0yGIZLEWukR5zcUHESxP9cMIlrCL1dQu3U+SlK93OvRw6esP3 +E48mVJwWa2uv+9iWsWCaSOAlIiR5NM4OJgALTqv9i86C1y8IcGjBqAr5dE8Hq6T54oN+J3N0Prj5 +OEL8pahbSCOz6+MlsoCultQKnMJ4msZoGK43YjdeUXWoWGPAUe5AeH6orxqg4bB4nVCMe+ez/I4j +sNtlAHCEAQgAFG5Uhpq6zPk3EPbg3oQtnaSFN9OH4xXQwReQfhkhahKpdv0SAulPIV4XAgMBAAGj +YTBfMA8GA1UdEwEB/wQFMAMBAf8wTAYDVR0gBEUwQzBBBgtghkgBhvt7hwcBCzAyMDAGCCsGAQUF +BwIBFiRodHRwOi8vd3d3LndlbGxzZmFyZ28uY29tL2NlcnRwb2xpY3kwDQYJKoZIhvcNAQEFBQAD +ggEBANIn3ZwKdyu7IvICtUpKkfnRLb7kuxpo7w6kAOnu5+/u9vnldKTC2FJYxHT7zmu1Oyl5GFrv +m+0fazbuSCUlFLZWohDo7qd/0D+j0MNdJu4HzMPBJCGHHt8qElNvQRbn7a6U+oxy+hNH8Dx+rn0R +OhPs7fpvcmR7nX1/Jv16+yWt6j4pf0zjAFcysLPp7VMX2YuyFA4w6OXVE8Zkr8QA1dhYJPz1j+zx +x32l2w8n0cbyQIjmH/ZhqPRCyLk306m+LFZ4wnKbWV01QIroTmMatukgalHizqSQ33ZwmVxwQ023 +tqcZZE6St8WRPH9IFmV7Fv3L/PvZ1dZPIWU7Sn9Ho/s= +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Platinum CA - G2 +========================== +-----BEGIN CERTIFICATE----- +MIIFwTCCA6mgAwIBAgIITrIAZwwDXU8wDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEjMCEGA1UEAxMaU3dpc3NTaWduIFBsYXRpbnVtIENBIC0gRzIw +HhcNMDYxMDI1MDgzNjAwWhcNMzYxMDI1MDgzNjAwWjBJMQswCQYDVQQGEwJDSDEVMBMGA1UEChMM +U3dpc3NTaWduIEFHMSMwIQYDVQQDExpTd2lzc1NpZ24gUGxhdGludW0gQ0EgLSBHMjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrfogLi2vj8Bxax3mCq3pZcZB/HL37PZ/pEQtZ2Y5Wu +669yIIpFR4ZieIbWIDkm9K6j/SPnpZy1IiEZtzeTIsBQnIJ71NUERFzLtMKfkr4k2HtnIuJpX+UF +eNSH2XFwMyVTtIc7KZAoNppVRDBopIOXfw0enHb/FZ1glwCNioUD7IC+6ixuEFGSzH7VozPY1kne +WCqv9hbrS3uQMpe5up1Y8fhXSQQeol0GcN1x2/ndi5objM89o03Oy3z2u5yg+gnOI2Ky6Q0f4nIo +j5+saCB9bzuohTEJfwvH6GXp43gOCWcwizSC+13gzJ2BbWLuCB4ELE6b7P6pT1/9aXjvCR+htL/6 +8++QHkwFix7qepF6w9fl+zC8bBsQWJj3Gl/QKTIDE0ZNYWqFTFJ0LwYfexHihJfGmfNtf9dng34T +aNhxKFrYzt3oEBSa/m0jh26OWnA81Y0JAKeqvLAxN23IhBQeW71FYyBrS3SMvds6DsHPWhaPpZjy +domyExI7C3d3rLvlPClKknLKYRorXkzig3R3+jVIeoVNjZpTxN94ypeRSCtFKwH3HBqi7Ri6Cr2D ++m+8jVeTO9TUps4e8aCxzqv9KyiaTxvXw3LbpMS/XUz13XuWae5ogObnmLo2t/5u7Su9IPhlGdpV +CX4l3P5hYnL5fhgC72O00Puv5TtjjGePAgMBAAGjgawwgakwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFFCvzAeHFUdvOMW0ZdHelarp35zMMB8GA1UdIwQYMBaAFFCv +zAeHFUdvOMW0ZdHelarp35zMMEYGA1UdIAQ/MD0wOwYJYIV0AVkBAQEBMC4wLAYIKwYBBQUHAgEW +IGh0dHA6Ly9yZXBvc2l0b3J5LnN3aXNzc2lnbi5jb20vMA0GCSqGSIb3DQEBBQUAA4ICAQAIhab1 +Fgz8RBrBY+D5VUYI/HAcQiiWjrfFwUF1TglxeeVtlspLpYhg0DB0uMoI3LQwnkAHFmtllXcBrqS3 +NQuB2nEVqXQXOHtYyvkv+8Bldo1bAbl93oI9ZLi+FHSjClTTLJUYFzX1UWs/j6KWYTl4a0vlpqD4 +U99REJNi54Av4tHgvI42Rncz7Lj7jposiU0xEQ8mngS7twSNC/K5/FqdOxa3L8iYq/6KUFkuozv8 +KV2LwUvJ4ooTHbG/u0IdUt1O2BReEMYxB+9xJ/cbOQncguqLs5WGXv312l0xpuAxtpTmREl0xRbl +9x8DYSjFyMsSoEJL+WuICI20MhjzdZ/EfwBPBZWcoxcCw7NTm6ogOSkrZvqdr16zktK1puEa+S1B +aYEUtLS17Yk9zvupnTVCRLEcFHOBzyoBNZox1S2PbYTfgE1X4z/FhHXaicYwu+uPyyIIoK6q8QNs +OktNCaUOcsZWayFCTiMlFGiudgp8DAdwZPmaL/YFOSbGDI8Zf0NebvRbFS/bYV3mZy8/CJT5YLSY +Mdp08YSTcU1f+2BY0fvEwW2JorsgH51xkcsymxM9Pn2SUjWskpSi0xjCfMfqr3YFFt1nJ8J+HAci +IfNAChs0B0QTwoRqjt8ZWr9/6x3iGjjRXK9HkmuAtTClyY3YqzGBH9/CZjfTk6mFhnll0g== +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +S-TRUST Authentication and Encryption Root CA 2005 PN +===================================================== +-----BEGIN CERTIFICATE----- +MIIEezCCA2OgAwIBAgIQNxkY5lNUfBq1uMtZWts1tzANBgkqhkiG9w0BAQUFADCBrjELMAkGA1UE +BhMCREUxIDAeBgNVBAgTF0JhZGVuLVd1ZXJ0dGVtYmVyZyAoQlcpMRIwEAYDVQQHEwlTdHV0dGdh +cnQxKTAnBgNVBAoTIERldXRzY2hlciBTcGFya2Fzc2VuIFZlcmxhZyBHbWJIMT4wPAYDVQQDEzVT +LVRSVVNUIEF1dGhlbnRpY2F0aW9uIGFuZCBFbmNyeXB0aW9uIFJvb3QgQ0EgMjAwNTpQTjAeFw0w +NTA2MjIwMDAwMDBaFw0zMDA2MjEyMzU5NTlaMIGuMQswCQYDVQQGEwJERTEgMB4GA1UECBMXQmFk +ZW4tV3VlcnR0ZW1iZXJnIChCVykxEjAQBgNVBAcTCVN0dXR0Z2FydDEpMCcGA1UEChMgRGV1dHNj +aGVyIFNwYXJrYXNzZW4gVmVybGFnIEdtYkgxPjA8BgNVBAMTNVMtVFJVU1QgQXV0aGVudGljYXRp +b24gYW5kIEVuY3J5cHRpb24gUm9vdCBDQSAyMDA1OlBOMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA2bVKwdMz6tNGs9HiTNL1toPQb9UY6ZOvJ44TzbUlNlA0EmQpoVXhOmCTnijJ4/Ob +4QSwI7+Vio5bG0F/WsPoTUzVJBY+h0jUJ67m91MduwwA7z5hca2/OnpYH5Q9XIHV1W/fuJvS9eXL +g3KSwlOyggLrra1fFi2SU3bxibYs9cEv4KdKb6AwajLrmnQDaHgTncovmwsdvs91DSaXm8f1Xgqf +eN+zvOyauu9VjxuapgdjKRdZYgkqeQd3peDRF2npW932kKvimAoA0SVtnteFhy+S8dF2g08LOlk3 +KC8zpxdQ1iALCvQm+Z845y2kuJuJja2tyWp9iRe79n+Ag3rm7QIDAQABo4GSMIGPMBIGA1UdEwEB +/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgEGMCkGA1UdEQQiMCCkHjAcMRowGAYDVQQDExFTVFJv +bmxpbmUxLTIwNDgtNTAdBgNVHQ4EFgQUD8oeXHngovMpttKFswtKtWXsa1IwHwYDVR0jBBgwFoAU +D8oeXHngovMpttKFswtKtWXsa1IwDQYJKoZIhvcNAQEFBQADggEBAK8B8O0ZPCjoTVy7pWMciDMD +pwCHpB8gq9Yc4wYfl35UvbfRssnV2oDsF9eK9XvCAPbpEW+EoFolMeKJ+aQAPzFoLtU96G7m1R08 +P7K9n3frndOMusDXtk3sU5wPBG7qNWdX4wple5A64U8+wwCSersFiXOMy6ZNwPv2AtawB6MDwidA +nwzkhYItr5pCHdDHjfhA7p0GVxzZotiAFP7hYy0yh9WUUpY6RsZxlj33mA6ykaqP2vROJAA5Veit +F7nTNCtKqUDMFypVZUF0Qn71wK/Ik63yGFs9iQzbRzkk+OBM8h+wPQrKBU6JIRrjKpms/H+h8Q8b +Hz2eBIPdltkdOpQ= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. +====================================== +-----BEGIN CERTIFICATE----- +MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT +AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg +LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w +HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ +U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh +IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN +yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU +2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 +4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP +2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm +8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf +HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa +Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK +5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b +czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g +ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF +BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug +cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf +AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX +EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v +/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 +MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 +3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk +eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f +/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h +RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU +Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 3 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw +MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W +yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo +6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ +uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk +2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE +O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 +yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 +IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal +092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc +5A== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign CA +========== +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIQFBOWgxRVjOp7Y+X8NId3RDANBgkqhkiG9w0BAQUFADA0MRMwEQYDVQQD +EwpDb21TaWduIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTMy +MThaFw0yOTAzMTkxNTAyMThaMDQxEzARBgNVBAMTCkNvbVNpZ24gQ0ExEDAOBgNVBAoTB0NvbVNp +Z24xCzAJBgNVBAYTAklMMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8ORUaSvTx49q +ROR+WCf4C9DklBKK8Rs4OC8fMZwG1Cyn3gsqrhqg455qv588x26i+YtkbDqthVVRVKU4VbirgwTy +P2Q298CNQ0NqZtH3FyrV7zb6MBBC11PN+fozc0yz6YQgitZBJzXkOPqUm7h65HkfM/sb2CEJKHxN +GGleZIp6GZPKfuzzcuc3B1hZKKxC+cX/zT/npfo4sdAMx9lSGlPWgcxCejVb7Us6eva1jsz/D3zk +YDaHL63woSV9/9JLEYhwVKZBqGdTUkJe5DSe5L6j7KpiXd3DTKaCQeQzC6zJMw9kglcq/QytNuEM +rkvF7zuZ2SOzW120V+x0cAwqTwIDAQABo4GgMIGdMAwGA1UdEwQFMAMBAf8wPQYDVR0fBDYwNDAy +oDCgLoYsaHR0cDovL2ZlZGlyLmNvbXNpZ24uY28uaWwvY3JsL0NvbVNpZ25DQS5jcmwwDgYDVR0P +AQH/BAQDAgGGMB8GA1UdIwQYMBaAFEsBmz5WGmU2dst7l6qSBe4y5ygxMB0GA1UdDgQWBBRLAZs+ +VhplNnbLe5eqkgXuMucoMTANBgkqhkiG9w0BAQUFAAOCAQEA0Nmlfv4pYEWdfoPPbrxHbvUanlR2 +QnG0PFg/LUAlQvaBnPGJEMgOqnhPOAlXsDzACPw1jvFIUY0McXS6hMTXcpuEfDhOZAYnKuGntewI +mbQKDdSFc8gS4TXt8QUxHXOZDOuWyt3T5oWq8Ir7dcHyCTxlZWTzTNity4hp8+SDtwy9F1qWF8pb +/627HOkthIDYIb6FUtnUdLlphbpN7Sgy6/lhSuTENh4Z3G+EER+V9YMoGKgzkkMn3V0TBEVPh9VG +zT2ouvDzuFYkRes3x+F2T3I5GN9+dHLHcy056mDmrRGiVod7w2ia/viMcKjfZTL0pECMocJEAw6U +AGegcQCCSA== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Főtanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCED9pHoGc8JpK83P/uUii5N0wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAxIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAx +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDlGb9to1ZhLZlIcfZn3rmN67eehoAKkQ76OCWvRoiC5XOooJskXQ0fzGVuDLDQ +VoQYh5oGmxChc9+0WDlrbsH2FdWoqD+qEgaNMax/sDTXjzRniAnNFBHiTkVWaR94AoDa3EeRKbs2 +yWNcxeDXLYd7obcysHswuiovMaruo2fa2wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFgVKTk8d6Pa +XCUDfGD67gmZPCcQcMgMCeazh88K4hiWNWLMv5sneYlfycQJ9M61Hd8qveXbhpxoJeUwfLaJFf5n +0a3hUKw8fGJLj7qE1xIVGx/KXQ/BUpQqEZnae88MNhPVNdwQGVnqlMEAv3WP2fr9dgTbYruQagPZ +RjXZ+Hxb +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBABByUqkFFBky +CEHwxWsKzH4PIRnN5GfcX6kb5sroc50i2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWX +bj9T/UWZYB2oK0z5XqcJ2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/ +D/xwzoiQ +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi +=================================================== +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz +ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 +MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 +cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u +aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY +8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y +jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI +JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk +9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG +SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d +F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq +D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 +Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +TC TrustCenter Universal CA III +=============================== +-----BEGIN CERTIFICATE----- +MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAe +Fw0wOTA5MDkwODE1MjdaFw0yOTEyMzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNU +QyBUcnVzdENlbnRlciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0Ex +KDAmBgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF5+cvAqBNLaT6hdqbJYUt +QCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYvDIRlzg9uwliT6CwLOunBjvvya8o84pxO +juT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8vzArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+Eut +CHnNaYlAJ/Uqwa1D7KRTyGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1 +M4BDj5yjdipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBhMB8G +A1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI4jANBgkqhkiG9w0BAQUFAAOCAQEA +g8ev6n9NCjw5sWi+e22JLumzCecYV42FmhfzdkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+ +KGwWaODIl0YgoGhnYIg5IFHYaAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhK +BgePxLcHsU0GDeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV +CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPHLQNjO9Po5KIq +woIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg== +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- diff --git a/awx/lib/site-packages/pip/cmdoptions.py b/awx/lib/site-packages/pip/cmdoptions.py new file mode 100644 index 0000000000..4bf910cfdc --- /dev/null +++ b/awx/lib/site-packages/pip/cmdoptions.py @@ -0,0 +1,190 @@ +"""shared options and groups""" +from optparse import make_option, OptionGroup +from pip.locations import build_prefix + + +def make_option_group(group, parser): + """ + Return an OptionGroup object + group -- assumed to be dict with 'name' and 'options' keys + parser -- an optparse Parser + """ + option_group = OptionGroup(parser, group['name']) + for option in group['options']: + option_group.add_option(option) + return option_group + +########### +# options # +########### + +index_url = make_option( + '-i', '--index-url', '--pypi-url', + dest='index_url', + metavar='URL', + default='https://pypi.python.org/simple/', + help='Base URL of Python Package Index (default %default).') + +extra_index_url = make_option( + '--extra-index-url', + dest='extra_index_urls', + metavar='URL', + action='append', + default=[], + help='Extra URLs of package indexes to use in addition to --index-url.') + +no_index = make_option( + '--no-index', + dest='no_index', + action='store_true', + default=False, + help='Ignore package index (only looking at --find-links URLs instead).') + +find_links = make_option( + '-f', '--find-links', + dest='find_links', + action='append', + default=[], + metavar='url', + help="If a url or path to an html file, then parse for links to archives. If a local path or file:// url that's a directory, then look for archives in the directory listing.") + +use_mirrors = make_option( + '-M', '--use-mirrors', + dest='use_mirrors', + action='store_true', + default=False, + help='Use the PyPI mirrors as a fallback in case the main index is down.') + +mirrors = make_option( + '--mirrors', + dest='mirrors', + metavar='URL', + action='append', + default=[], + help='Specific mirror URLs to query when --use-mirrors is used.') + +allow_external = make_option( + "--allow-external", + dest="allow_external", + action="append", + default=[], + metavar="PACKAGE", + help="Allow the installation of externally hosted files", +) + +allow_all_external = make_option( + "--allow-all-external", + dest="allow_all_external", + action="store_true", + default=True, # TODO: Change to False after 1.4 has been released + help="Allow the installation of all externally hosted files", +) + +# TODO: NOOP after 1.4 has been released +no_allow_external = make_option( + "--no-allow-external", + dest="allow_all_external", + action="store_false", + help="Disallow the installation of all externally hosted files", +) + +allow_unsafe = make_option( + "--allow-insecure", + dest="allow_insecure", + action="append", + default=[], + metavar="PACKAGE", + help="Allow the installation of insecure and unverifiable files", +) + +no_allow_unsafe = make_option( + "--no-allow-insecure", + dest="allow_all_insecure", + action="store_false", + default=True, + help="Disallow the installation of insecure and unverifiable files" +) + +requirements = make_option( + '-r', '--requirement', + dest='requirements', + action='append', + default=[], + metavar='file', + help='Install from the given requirements file. ' + 'This option can be used multiple times.') + +use_wheel = make_option( + '--use-wheel', + dest='use_wheel', + action='store_true', + help='Find and prefer wheel archives when searching indexes and find-links locations. Default to accepting source archives.') + +download_cache = make_option( + '--download-cache', + dest='download_cache', + metavar='dir', + default=None, + help='Cache downloaded packages in .') + +no_deps = make_option( + '--no-deps', '--no-dependencies', + dest='ignore_dependencies', + action='store_true', + default=False, + help="Don't install package dependencies.") + +build_dir = make_option( + '-b', '--build', '--build-dir', '--build-directory', + dest='build_dir', + metavar='dir', + default=build_prefix, + help='Directory to unpack packages into and build in. ' + 'The default in a virtualenv is "/build". ' + 'The default for global installs is "/pip_build_".') + +install_options = make_option( + '--install-option', + dest='install_options', + action='append', + metavar='options', + help="Extra arguments to be supplied to the setup.py install " + "command (use like --install-option=\"--install-scripts=/usr/local/bin\"). " + "Use multiple --install-option options to pass multiple options to setup.py install. " + "If you are using an option with a directory path, be sure to use absolute path.") + +global_options = make_option( + '--global-option', + dest='global_options', + action='append', + metavar='options', + help="Extra global options to be supplied to the setup.py " + "call before the install command.") + +no_clean = make_option( + '--no-clean', + action='store_true', + default=False, + help="Don't clean up build directories.") + + +########## +# groups # +########## + +index_group = { + 'name': 'Package Index Options', + 'options': [ + index_url, + extra_index_url, + no_index, + find_links, + use_mirrors, + mirrors, + allow_external, + allow_all_external, + no_allow_external, + allow_unsafe, + no_allow_unsafe, + ] + } diff --git a/awx/lib/site-packages/pip/commands/__init__.py b/awx/lib/site-packages/pip/commands/__init__.py new file mode 100644 index 0000000000..e0702d2700 --- /dev/null +++ b/awx/lib/site-packages/pip/commands/__init__.py @@ -0,0 +1,88 @@ +""" +Package containing all pip commands +""" + + +from pip.commands.bundle import BundleCommand +from pip.commands.completion import CompletionCommand +from pip.commands.freeze import FreezeCommand +from pip.commands.help import HelpCommand +from pip.commands.list import ListCommand +from pip.commands.search import SearchCommand +from pip.commands.show import ShowCommand +from pip.commands.install import InstallCommand +from pip.commands.uninstall import UninstallCommand +from pip.commands.unzip import UnzipCommand +from pip.commands.zip import ZipCommand +from pip.commands.wheel import WheelCommand + + +commands = { + BundleCommand.name: BundleCommand, + CompletionCommand.name: CompletionCommand, + FreezeCommand.name: FreezeCommand, + HelpCommand.name: HelpCommand, + SearchCommand.name: SearchCommand, + ShowCommand.name: ShowCommand, + InstallCommand.name: InstallCommand, + UninstallCommand.name: UninstallCommand, + UnzipCommand.name: UnzipCommand, + ZipCommand.name: ZipCommand, + ListCommand.name: ListCommand, + WheelCommand.name: WheelCommand, +} + + +commands_order = [ + InstallCommand, + UninstallCommand, + FreezeCommand, + ListCommand, + ShowCommand, + SearchCommand, + WheelCommand, + ZipCommand, + UnzipCommand, + BundleCommand, + HelpCommand, +] + + +def get_summaries(ignore_hidden=True, ordered=True): + """Yields sorted (command name, command summary) tuples.""" + + if ordered: + cmditems = _sort_commands(commands, commands_order) + else: + cmditems = commands.items() + + for name, command_class in cmditems: + if ignore_hidden and command_class.hidden: + continue + + yield (name, command_class.summary) + + +def get_similar_commands(name): + """Command name auto-correct.""" + from difflib import get_close_matches + + close_commands = get_close_matches(name, commands.keys()) + + if close_commands: + guess = close_commands[0] + else: + guess = False + + return guess + + +def _sort_commands(cmddict, order): + def keyfn(key): + try: + return order.index(key[1]) + except ValueError: + # unordered items should come last + return 0xff + + return sorted(cmddict.items(), key=keyfn) diff --git a/awx/lib/site-packages/pip/commands/bundle.py b/awx/lib/site-packages/pip/commands/bundle.py new file mode 100644 index 0000000000..0b7f15093a --- /dev/null +++ b/awx/lib/site-packages/pip/commands/bundle.py @@ -0,0 +1,53 @@ +import textwrap +from pip.locations import build_prefix, src_prefix +from pip.util import display_path, backup_dir +from pip.log import logger +from pip.exceptions import InstallationError +from pip.commands.install import InstallCommand + + +class BundleCommand(InstallCommand): + """Create pybundles (archives containing multiple packages).""" + name = 'bundle' + usage = """ + %prog [options] .pybundle ...""" + summary = 'Create pybundles.' + bundle = True + + def __init__(self, *args, **kw): + super(BundleCommand, self).__init__(*args, **kw) + # bundle uses different default source and build dirs + build_opt = self.parser.get_option("--build") + build_opt.default = backup_dir(build_prefix, '-bundle') + src_opt = self.parser.get_option("--src") + src_opt.default = backup_dir(src_prefix, '-bundle') + self.parser.set_defaults(**{ + src_opt.dest: src_opt.default, + build_opt.dest: build_opt.default, + }) + + def run(self, options, args): + + deprecation = textwrap.dedent(""" + + ############################################### + ## ## + ## Due to lack of interest and maintenance, ## + ## 'pip bundle' and support for installing ## + ## from *.pybundle files is now deprecated, ## + ## and will be removed in pip v1.5. ## + ## ## + ############################################### + + """) + logger.notify(deprecation) + + if not args: + raise InstallationError('You must give a bundle filename') + # We have to get everything when creating a bundle: + options.ignore_installed = True + logger.notify('Putting temporary build files in %s and source/develop files in %s' + % (display_path(options.build_dir), display_path(options.src_dir))) + self.bundle_filename = args.pop(0) + requirement_set = super(BundleCommand, self).run(options, args) + return requirement_set diff --git a/awx/lib/site-packages/pip/commands/completion.py b/awx/lib/site-packages/pip/commands/completion.py new file mode 100644 index 0000000000..5fa2376206 --- /dev/null +++ b/awx/lib/site-packages/pip/commands/completion.py @@ -0,0 +1,59 @@ +import sys +from pip.basecommand import Command + +BASE_COMPLETION = """ +# pip %(shell)s completion start%(script)s# pip %(shell)s completion end +""" + +COMPLETION_SCRIPTS = { + 'bash': """ +_pip_completion() +{ + COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ + COMP_CWORD=$COMP_CWORD \\ + PIP_AUTO_COMPLETE=1 $1 ) ) +} +complete -o default -F _pip_completion pip +""", 'zsh': """ +function _pip_completion { + local words cword + read -Ac words + read -cn cword + reply=( $( COMP_WORDS="$words[*]" \\ + COMP_CWORD=$(( cword-1 )) \\ + PIP_AUTO_COMPLETE=1 $words[1] ) ) +} +compctl -K _pip_completion pip +"""} + + +class CompletionCommand(Command): + """A helper command to be used for command completion.""" + name = 'completion' + summary = 'A helper command to be used for command completion' + hidden = True + + def __init__(self, *args, **kw): + super(CompletionCommand, self).__init__(*args, **kw) + self.parser.add_option( + '--bash', '-b', + action='store_const', + const='bash', + dest='shell', + help='Emit completion code for bash') + self.parser.add_option( + '--zsh', '-z', + action='store_const', + const='zsh', + dest='shell', + help='Emit completion code for zsh') + + def run(self, options, args): + """Prints the completion code of the given shell""" + shells = COMPLETION_SCRIPTS.keys() + shell_options = ['--' + shell for shell in sorted(shells)] + if options.shell in shells: + script = COMPLETION_SCRIPTS.get(options.shell, '') + print(BASE_COMPLETION % {'script': script, 'shell': options.shell}) + else: + sys.stderr.write('ERROR: You must pass %s\n' % ' or '.join(shell_options)) diff --git a/awx/lib/site-packages/pip/commands/freeze.py b/awx/lib/site-packages/pip/commands/freeze.py new file mode 100644 index 0000000000..9c2ab7239f --- /dev/null +++ b/awx/lib/site-packages/pip/commands/freeze.py @@ -0,0 +1,113 @@ +import re +import sys +import pkg_resources +import pip +from pip.req import InstallRequirement +from pip.log import logger +from pip.basecommand import Command +from pip.util import get_installed_distributions + + +class FreezeCommand(Command): + """Output installed packages in requirements format.""" + name = 'freeze' + usage = """ + %prog [options]""" + summary = 'Output installed packages in requirements format.' + + def __init__(self, *args, **kw): + super(FreezeCommand, self).__init__(*args, **kw) + + self.cmd_opts.add_option( + '-r', '--requirement', + dest='requirement', + action='store', + default=None, + metavar='file', + help="Use the order in the given requirements file and it's comments when generating output.") + self.cmd_opts.add_option( + '-f', '--find-links', + dest='find_links', + action='append', + default=[], + metavar='URL', + help='URL for finding packages, which will be added to the output.') + self.cmd_opts.add_option( + '-l', '--local', + dest='local', + action='store_true', + default=False, + help='If in a virtualenv that has global access, do not output globally-installed packages.') + + self.parser.insert_option_group(0, self.cmd_opts) + + def setup_logging(self): + logger.move_stdout_to_stderr() + + def run(self, options, args): + requirement = options.requirement + find_links = options.find_links or [] + local_only = options.local + ## FIXME: Obviously this should be settable: + find_tags = False + skip_match = None + + skip_regex = options.skip_requirements_regex + if skip_regex: + skip_match = re.compile(skip_regex) + + dependency_links = [] + + f = sys.stdout + + for dist in pkg_resources.working_set: + if dist.has_metadata('dependency_links.txt'): + dependency_links.extend(dist.get_metadata_lines('dependency_links.txt')) + for link in find_links: + if '#egg=' in link: + dependency_links.append(link) + for link in find_links: + f.write('-f %s\n' % link) + installations = {} + for dist in get_installed_distributions(local_only=local_only): + req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags) + installations[req.name] = req + if requirement: + req_f = open(requirement) + for line in req_f: + if not line.strip() or line.strip().startswith('#'): + f.write(line) + continue + if skip_match and skip_match.search(line): + f.write(line) + continue + elif line.startswith('-e') or line.startswith('--editable'): + if line.startswith('-e'): + line = line[2:].strip() + else: + line = line[len('--editable'):].strip().lstrip('=') + line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs) + elif (line.startswith('-r') or line.startswith('--requirement') + or line.startswith('-Z') or line.startswith('--always-unzip') + or line.startswith('-f') or line.startswith('-i') + or line.startswith('--extra-index-url') + or line.startswith('--find-links') + or line.startswith('--index-url')): + f.write(line) + continue + else: + line_req = InstallRequirement.from_line(line) + if not line_req.name: + logger.notify("Skipping line because it's not clear what it would install: %s" + % line.strip()) + logger.notify(" (add #egg=PackageName to the URL to avoid this warning)") + continue + if line_req.name not in installations: + logger.warn("Requirement file contains %s, but that package is not installed" + % line.strip()) + continue + f.write(str(installations[line_req.name])) + del installations[line_req.name] + f.write('## The following requirements were added by pip --freeze:\n') + for installation in sorted(installations.values(), key=lambda x: x.name): + f.write(str(installation)) diff --git a/awx/lib/site-packages/pip/commands/help.py b/awx/lib/site-packages/pip/commands/help.py new file mode 100644 index 0000000000..4771db7539 --- /dev/null +++ b/awx/lib/site-packages/pip/commands/help.py @@ -0,0 +1,33 @@ +from pip.basecommand import Command, SUCCESS +from pip.exceptions import CommandError + + +class HelpCommand(Command): + """Show help for commands""" + name = 'help' + usage = """ + %prog """ + summary = 'Show help for commands.' + + def run(self, options, args): + from pip.commands import commands, get_similar_commands + + try: + # 'pip help' with no args is handled by pip.__init__.parseopt() + cmd_name = args[0] # the command we need help for + except IndexError: + return SUCCESS + + if cmd_name not in commands: + guess = get_similar_commands(cmd_name) + + msg = ['unknown command "%s"' % cmd_name] + if guess: + msg.append('maybe you meant "%s"' % guess) + + raise CommandError(' - '.join(msg)) + + command = commands[cmd_name](self.main_parser) # instantiate + command.parser.print_help() + + return SUCCESS diff --git a/awx/lib/site-packages/pip/commands/install.py b/awx/lib/site-packages/pip/commands/install.py new file mode 100644 index 0000000000..bcdc9c1ae6 --- /dev/null +++ b/awx/lib/site-packages/pip/commands/install.py @@ -0,0 +1,271 @@ +import os +import sys +import tempfile +import shutil +from pip.req import InstallRequirement, RequirementSet, parse_requirements +from pip.log import logger +from pip.locations import src_prefix, virtualenv_no_global, distutils_scheme +from pip.basecommand import Command +from pip.index import PackageFinder +from pip.exceptions import InstallationError, CommandError, PreviousBuildDirError +from pip import cmdoptions + + +class InstallCommand(Command): + """ + Install packages from: + + - PyPI (and other indexes) using requirement specifiers. + - VCS project urls. + - Local project directories. + - Local or remote source archives. + + pip also supports installing from "requirements files", which provide + an easy way to specify a whole environment to be installed. + + See http://www.pip-installer.org for details on VCS url formats and + requirements files. + """ + name = 'install' + + usage = """ + %prog [options] ... + %prog [options] -r ... + %prog [options] [-e] ... + %prog [options] [-e] ... + %prog [options] ...""" + + summary = 'Install packages.' + bundle = False + + def __init__(self, *args, **kw): + super(InstallCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option( + '-e', '--editable', + dest='editables', + action='append', + default=[], + metavar='path/url', + help='Install a project in editable mode (i.e. setuptools "develop mode") from a local project path or a VCS url.') + + cmd_opts.add_option(cmdoptions.requirements) + cmd_opts.add_option(cmdoptions.build_dir) + + cmd_opts.add_option( + '-t', '--target', + dest='target_dir', + metavar='dir', + default=None, + help='Install packages into .') + + cmd_opts.add_option( + '-d', '--download', '--download-dir', '--download-directory', + dest='download_dir', + metavar='dir', + default=None, + help="Download packages into instead of installing them, regardless of what's already installed.") + + cmd_opts.add_option(cmdoptions.download_cache) + + cmd_opts.add_option( + '--src', '--source', '--source-dir', '--source-directory', + dest='src_dir', + metavar='dir', + default=src_prefix, + help='Directory to check out editable projects into. ' + 'The default in a virtualenv is "/src". ' + 'The default for global installs is "/src".') + + cmd_opts.add_option( + '-U', '--upgrade', + dest='upgrade', + action='store_true', + help='Upgrade all packages to the newest available version. ' + 'This process is recursive regardless of whether a dependency is already satisfied.') + + cmd_opts.add_option( + '--force-reinstall', + dest='force_reinstall', + action='store_true', + help='When upgrading, reinstall all packages even if they are ' + 'already up-to-date.') + + cmd_opts.add_option( + '-I', '--ignore-installed', + dest='ignore_installed', + action='store_true', + help='Ignore the installed packages (reinstalling instead).') + + cmd_opts.add_option(cmdoptions.no_deps) + + cmd_opts.add_option( + '--no-install', + dest='no_install', + action='store_true', + help="Download and unpack all packages, but don't actually install them.") + + cmd_opts.add_option( + '--no-download', + dest='no_download', + action="store_true", + help="Don't download any packages, just install the ones already downloaded " + "(completes an install run with --no-install).") + + cmd_opts.add_option(cmdoptions.install_options) + cmd_opts.add_option(cmdoptions.global_options) + + cmd_opts.add_option( + '--user', + dest='use_user_site', + action='store_true', + help='Install using the user scheme.') + + cmd_opts.add_option( + '--egg', + dest='as_egg', + action='store_true', + help="Install as self contained egg file, like easy_install does.") + + cmd_opts.add_option( + '--root', + dest='root_path', + metavar='dir', + default=None, + help="Install everything relative to this alternate root directory.") + + cmd_opts.add_option(cmdoptions.use_wheel) + + cmd_opts.add_option( + '--pre', + action='store_true', + default=False, + help="Include pre-release and development versions. By default, pip only finds stable versions.") + + cmd_opts.add_option(cmdoptions.no_clean) + + index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def _build_package_finder(self, options, index_urls): + """ + Create a package finder appropriate to this install command. + This method is meant to be overridden by subclasses, not + called directly. + """ + return PackageFinder(find_links=options.find_links, + index_urls=index_urls, + use_mirrors=options.use_mirrors, + mirrors=options.mirrors, + use_wheel=options.use_wheel, + allow_external=options.allow_external, + allow_insecure=options.allow_insecure, + allow_all_external=options.allow_all_external, + allow_all_insecure=options.allow_all_insecure, + allow_all_prereleases=options.pre, + ) + + def run(self, options, args): + if options.download_dir: + options.no_install = True + options.ignore_installed = True + options.build_dir = os.path.abspath(options.build_dir) + options.src_dir = os.path.abspath(options.src_dir) + install_options = options.install_options or [] + if options.use_user_site: + if virtualenv_no_global(): + raise InstallationError("Can not perform a '--user' install. User site-packages are not visible in this virtualenv.") + install_options.append('--user') + + temp_target_dir = None + if options.target_dir: + options.ignore_installed = True + temp_target_dir = tempfile.mkdtemp() + options.target_dir = os.path.abspath(options.target_dir) + if os.path.exists(options.target_dir) and not os.path.isdir(options.target_dir): + raise CommandError("Target path exists but is not a directory, will not continue.") + install_options.append('--home=' + temp_target_dir) + + global_options = options.global_options or [] + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index: + logger.notify('Ignoring indexes: %s' % ','.join(index_urls)) + index_urls = [] + + finder = self._build_package_finder(options, index_urls) + + requirement_set = RequirementSet( + build_dir=options.build_dir, + src_dir=options.src_dir, + download_dir=options.download_dir, + download_cache=options.download_cache, + upgrade=options.upgrade, + as_egg=options.as_egg, + ignore_installed=options.ignore_installed, + ignore_dependencies=options.ignore_dependencies, + force_reinstall=options.force_reinstall, + use_user_site=options.use_user_site, + target_dir=temp_target_dir) + for name in args: + requirement_set.add_requirement( + InstallRequirement.from_line(name, None)) + for name in options.editables: + requirement_set.add_requirement( + InstallRequirement.from_editable(name, default_vcs=options.default_vcs)) + for filename in options.requirements: + for req in parse_requirements(filename, finder=finder, options=options): + requirement_set.add_requirement(req) + if not requirement_set.has_requirements: + opts = {'name': self.name} + if options.find_links: + msg = ('You must give at least one requirement to %(name)s ' + '(maybe you meant "pip %(name)s %(links)s"?)' % + dict(opts, links=' '.join(options.find_links))) + else: + msg = ('You must give at least one requirement ' + 'to %(name)s (see "pip help %(name)s")' % opts) + logger.warn(msg) + return + + try: + if not options.no_download: + requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle) + else: + requirement_set.locate_files() + + if not options.no_install and not self.bundle: + requirement_set.install(install_options, global_options, root=options.root_path) + installed = ' '.join([req.name for req in + requirement_set.successfully_installed]) + if installed: + logger.notify('Successfully installed %s' % installed) + elif not self.bundle: + downloaded = ' '.join([req.name for req in + requirement_set.successfully_downloaded]) + if downloaded: + logger.notify('Successfully downloaded %s' % downloaded) + elif self.bundle: + requirement_set.create_bundle(self.bundle_filename) + logger.notify('Created bundle in %s' % self.bundle_filename) + except PreviousBuildDirError: + return + finally: + # Clean up + if (not options.no_clean) and ((not options.no_install) or options.download_dir): + requirement_set.cleanup_files(bundle=self.bundle) + + if options.target_dir: + if not os.path.exists(options.target_dir): + os.makedirs(options.target_dir) + lib_dir = distutils_scheme('', home=temp_target_dir)['purelib'] + for item in os.listdir(lib_dir): + shutil.move( + os.path.join(lib_dir, item), + os.path.join(options.target_dir, item) + ) + shutil.rmtree(temp_target_dir) + return requirement_set diff --git a/awx/lib/site-packages/pip/commands/list.py b/awx/lib/site-packages/pip/commands/list.py new file mode 100644 index 0000000000..aaae7694db --- /dev/null +++ b/awx/lib/site-packages/pip/commands/list.py @@ -0,0 +1,147 @@ +from pip.basecommand import Command +from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled +from pip.index import PackageFinder +from pip.log import logger +from pip.req import InstallRequirement +from pip.util import get_installed_distributions, dist_is_editable +from pip.cmdoptions import make_option_group, index_group + + +class ListCommand(Command): + """List installed packages, including editables.""" + name = 'list' + usage = """ + %prog [options]""" + summary = 'List installed packages.' + + # distributions to skip (python itself is reported by pkg_resources.working_set) + skip = ['python'] + + def __init__(self, *args, **kw): + super(ListCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option( + '-o', '--outdated', + action='store_true', + default=False, + help='List outdated packages (excluding editables)') + cmd_opts.add_option( + '-u', '--uptodate', + action='store_true', + default=False, + help='List uptodate packages (excluding editables)') + cmd_opts.add_option( + '-e', '--editable', + action='store_true', + default=False, + help='List editable projects.') + cmd_opts.add_option( + '-l', '--local', + action='store_true', + default=False, + help='If in a virtualenv that has global access, do not list globally-installed packages.') + + cmd_opts.add_option( + '--pre', + action='store_true', + default=False, + help="Include pre-release and development versions. By default, pip only finds stable versions.") + + index_opts = make_option_group(index_group, self.parser) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def _build_package_finder(self, options, index_urls): + """ + Create a package finder appropriate to this list command. + """ + return PackageFinder(find_links=options.find_links, + index_urls=index_urls, + use_mirrors=options.use_mirrors, + mirrors=options.mirrors, + allow_external=options.allow_external, + allow_insecure=options.allow_insecure, + allow_all_external=options.allow_all_external, + allow_all_insecure=options.allow_all_insecure, + allow_all_prereleases=options.pre, + ) + + def run(self, options, args): + if options.outdated: + self.run_outdated(options) + elif options.uptodate: + self.run_uptodate(options) + elif options.editable: + self.run_editables(options) + else: + self.run_listing(options) + + def run_outdated(self, options): + for dist, remote_version_raw, remote_version_parsed in self.find_packages_latests_versions(options): + if remote_version_parsed > dist.parsed_version: + logger.notify('%s (Current: %s Latest: %s)' % (dist.project_name, + dist.version, remote_version_raw)) + + def find_packages_latests_versions(self, options): + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index: + logger.notify('Ignoring indexes: %s' % ','.join(index_urls)) + index_urls = [] + + dependency_links = [] + for dist in get_installed_distributions(local_only=options.local, skip=self.skip): + if dist.has_metadata('dependency_links.txt'): + dependency_links.extend( + dist.get_metadata_lines('dependency_links.txt'), + ) + + finder = self._build_package_finder(options, index_urls) + finder.add_dependency_links(dependency_links) + + installed_packages = get_installed_distributions(local_only=options.local, include_editables=False, skip=self.skip) + for dist in installed_packages: + req = InstallRequirement.from_line(dist.key, None) + try: + link = finder.find_requirement(req, True) + + # If link is None, means installed version is most up-to-date + if link is None: + continue + except DistributionNotFound: + continue + except BestVersionAlreadyInstalled: + remote_version = req.installed_version + else: + # It might be a good idea that link or finder had a public method + # that returned version + remote_version = finder._link_package_versions(link, req.name)[0] + remote_version_raw = remote_version[2] + remote_version_parsed = remote_version[0] + yield dist, remote_version_raw, remote_version_parsed + + def run_listing(self, options): + installed_packages = get_installed_distributions(local_only=options.local, skip=self.skip) + self.output_package_listing(installed_packages) + + def run_editables(self, options): + installed_packages = get_installed_distributions(local_only=options.local, editables_only=True) + self.output_package_listing(installed_packages) + + def output_package_listing(self, installed_packages): + installed_packages = sorted(installed_packages, key=lambda dist: dist.project_name.lower()) + for dist in installed_packages: + if dist_is_editable(dist): + line = '%s (%s, %s)' % (dist.project_name, dist.version, dist.location) + else: + line = '%s (%s)' % (dist.project_name, dist.version) + logger.notify(line) + + def run_uptodate(self, options): + uptodate = [] + for dist, remote_version_raw, remote_version_parsed in self.find_packages_latests_versions(options): + if dist.parsed_version == remote_version_parsed: + uptodate.append(dist) + self.output_package_listing(uptodate) diff --git a/awx/lib/site-packages/pip/commands/search.py b/awx/lib/site-packages/pip/commands/search.py new file mode 100644 index 0000000000..892eddd12f --- /dev/null +++ b/awx/lib/site-packages/pip/commands/search.py @@ -0,0 +1,130 @@ +import sys +import textwrap +import pkg_resources +import pip.download +from pip.basecommand import Command, SUCCESS +from pip.util import get_terminal_size +from pip.log import logger +from pip.backwardcompat import xmlrpclib, reduce, cmp +from pip.exceptions import CommandError +from pip.status_codes import NO_MATCHES_FOUND +from distutils.version import StrictVersion, LooseVersion + + +class SearchCommand(Command): + """Search for PyPI packages whose name or summary contains .""" + name = 'search' + usage = """ + %prog [options] """ + summary = 'Search PyPI for packages.' + + def __init__(self, *args, **kw): + super(SearchCommand, self).__init__(*args, **kw) + self.cmd_opts.add_option( + '--index', + dest='index', + metavar='URL', + default='https://pypi.python.org/pypi', + help='Base URL of Python Package Index (default %default)') + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + if not args: + raise CommandError('Missing required argument (search query).') + query = args + index_url = options.index + + pypi_hits = self.search(query, index_url) + hits = transform_hits(pypi_hits) + + terminal_width = None + if sys.stdout.isatty(): + terminal_width = get_terminal_size()[0] + + print_results(hits, terminal_width=terminal_width) + if pypi_hits: + return SUCCESS + return NO_MATCHES_FOUND + + def search(self, query, index_url): + pypi = xmlrpclib.ServerProxy(index_url) + hits = pypi.search({'name': query, 'summary': query}, 'or') + return hits + + +def transform_hits(hits): + """ + The list from pypi is really a list of versions. We want a list of + packages with the list of versions stored inline. This converts the + list from pypi into one we can use. + """ + packages = {} + for hit in hits: + name = hit['name'] + summary = hit['summary'] + version = hit['version'] + score = hit['_pypi_ordering'] + if score is None: + score = 0 + + if name not in packages.keys(): + packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score} + else: + packages[name]['versions'].append(version) + + # if this is the highest version, replace summary and score + if version == highest_version(packages[name]['versions']): + packages[name]['summary'] = summary + packages[name]['score'] = score + + # each record has a unique name now, so we will convert the dict into a list sorted by score + package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True) + return package_list + + +def print_results(hits, name_column_width=25, terminal_width=None): + installed_packages = [p.project_name for p in pkg_resources.working_set] + for hit in hits: + name = hit['name'] + summary = hit['summary'] or '' + if terminal_width is not None: + # wrap and indent summary to fit terminal + summary = textwrap.wrap(summary, terminal_width - name_column_width - 5) + summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) + line = '%s - %s' % (name.ljust(name_column_width), summary) + try: + logger.notify(line) + if name in installed_packages: + dist = pkg_resources.get_distribution(name) + logger.indent += 2 + try: + latest = highest_version(hit['versions']) + if dist.version == latest: + logger.notify('INSTALLED: %s (latest)' % dist.version) + else: + logger.notify('INSTALLED: %s' % dist.version) + logger.notify('LATEST: %s' % latest) + finally: + logger.indent -= 2 + except UnicodeEncodeError: + pass + + +def compare_versions(version1, version2): + try: + return cmp(StrictVersion(version1), StrictVersion(version2)) + # in case of abnormal version number, fall back to LooseVersion + except ValueError: + pass + try: + return cmp(LooseVersion(version1), LooseVersion(version2)) + except TypeError: + # certain LooseVersion comparions raise due to unorderable types, + # fallback to string comparison + return cmp([str(v) for v in LooseVersion(version1).version], + [str(v) for v in LooseVersion(version2).version]) + + +def highest_version(versions): + return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions) diff --git a/awx/lib/site-packages/pip/commands/show.py b/awx/lib/site-packages/pip/commands/show.py new file mode 100644 index 0000000000..f47f4b01e6 --- /dev/null +++ b/awx/lib/site-packages/pip/commands/show.py @@ -0,0 +1,79 @@ +import os +import pkg_resources +from pip.basecommand import Command +from pip.log import logger + + +class ShowCommand(Command): + """Show information about one or more installed packages.""" + name = 'show' + usage = """ + %prog [options] ...""" + summary = 'Show information about installed packages.' + + def __init__(self, *args, **kw): + super(ShowCommand, self).__init__(*args, **kw) + self.cmd_opts.add_option( + '-f', '--files', + dest='files', + action='store_true', + default=False, + help='Show the full list of installed files for each package.') + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + if not args: + logger.warn('ERROR: Please provide a package name or names.') + return + query = args + + results = search_packages_info(query) + print_results(results, options.files) + + +def search_packages_info(query): + """ + Gather details from installed distributions. Print distribution name, + version, location, and installed files. Installed files requires a + pip generated 'installed-files.txt' in the distributions '.egg-info' + directory. + """ + installed_packages = dict( + [(p.project_name.lower(), p) for p in pkg_resources.working_set]) + for name in query: + normalized_name = name.lower() + if normalized_name in installed_packages: + dist = installed_packages[normalized_name] + package = { + 'name': dist.project_name, + 'version': dist.version, + 'location': dist.location, + 'requires': [dep.project_name for dep in dist.requires()], + } + filelist = os.path.join( + dist.location, + dist.egg_name() + '.egg-info', + 'installed-files.txt') + if os.path.isfile(filelist): + package['files'] = filelist + yield package + + +def print_results(distributions, list_all_files): + """ + Print the informations from installed distributions found. + """ + for dist in distributions: + logger.notify("---") + logger.notify("Name: %s" % dist['name']) + logger.notify("Version: %s" % dist['version']) + logger.notify("Location: %s" % dist['location']) + logger.notify("Requires: %s" % ', '.join(dist['requires'])) + if list_all_files: + logger.notify("Files:") + if 'files' in dist: + for line in open(dist['files']): + logger.notify(" %s" % line.strip()) + else: + logger.notify("Cannot locate installed-files.txt") diff --git a/awx/lib/site-packages/pip/commands/uninstall.py b/awx/lib/site-packages/pip/commands/uninstall.py new file mode 100644 index 0000000000..388053b20f --- /dev/null +++ b/awx/lib/site-packages/pip/commands/uninstall.py @@ -0,0 +1,54 @@ +from pip.req import InstallRequirement, RequirementSet, parse_requirements +from pip.basecommand import Command +from pip.exceptions import InstallationError + + +class UninstallCommand(Command): + """ + Uninstall packages. + + pip is able to uninstall most installed packages. Known exceptions are: + + - Pure distutils packages installed with ``python setup.py install``, which + leave behind no metadata to determine what files were installed. + - Script wrappers installed by ``python setup.py develop``. + """ + name = 'uninstall' + usage = """ + %prog [options] ... + %prog [options] -r ...""" + summary = 'Uninstall packages.' + + def __init__(self, *args, **kw): + super(UninstallCommand, self).__init__(*args, **kw) + self.cmd_opts.add_option( + '-r', '--requirement', + dest='requirements', + action='append', + default=[], + metavar='file', + help='Uninstall all the packages listed in the given requirements file. ' + 'This option can be used multiple times.') + self.cmd_opts.add_option( + '-y', '--yes', + dest='yes', + action='store_true', + help="Don't ask for confirmation of uninstall deletions.") + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + requirement_set = RequirementSet( + build_dir=None, + src_dir=None, + download_dir=None) + for name in args: + requirement_set.add_requirement( + InstallRequirement.from_line(name)) + for filename in options.requirements: + for req in parse_requirements(filename, options=options): + requirement_set.add_requirement(req) + if not requirement_set.has_requirements: + raise InstallationError('You must give at least one requirement ' + 'to %(name)s (see "pip help %(name)s")' % dict(name=self.name)) + requirement_set.uninstall(auto_confirm=options.yes) diff --git a/awx/lib/site-packages/pip/commands/unzip.py b/awx/lib/site-packages/pip/commands/unzip.py new file mode 100644 index 0000000000..7986514b76 --- /dev/null +++ b/awx/lib/site-packages/pip/commands/unzip.py @@ -0,0 +1,7 @@ +from pip.commands.zip import ZipCommand + + +class UnzipCommand(ZipCommand): + """Unzip individual packages.""" + name = 'unzip' + summary = 'Unzip individual packages.' diff --git a/awx/lib/site-packages/pip/commands/wheel.py b/awx/lib/site-packages/pip/commands/wheel.py new file mode 100644 index 0000000000..5bf1c2333b --- /dev/null +++ b/awx/lib/site-packages/pip/commands/wheel.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import os +import sys +from pip.basecommand import Command +from pip.index import PackageFinder +from pip.log import logger +from pip.exceptions import CommandError, PreviousBuildDirError +from pip.req import InstallRequirement, RequirementSet, parse_requirements +from pip.util import normalize_path +from pip.wheel import WheelBuilder, wheel_setuptools_support, setuptools_requirement +from pip import cmdoptions + +DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse') + +class WheelCommand(Command): + """ + Build Wheel archives for your requirements and dependencies. + + Wheel is a built-package format, and offers the advantage of not recompiling your software during every install. + For more details, see the wheel docs: http://wheel.readthedocs.org/en/latest. + + Requirements: setuptools>=0.8, and wheel. + + 'pip wheel' uses the bdist_wheel setuptools extension from the wheel package to build individual wheels. + + """ + + name = 'wheel' + usage = """ + %prog [options] ... + %prog [options] -r ... + %prog [options] ... + %prog [options] ... + %prog [options] ...""" + + summary = 'Build wheels from your requirements.' + + def __init__(self, *args, **kw): + super(WheelCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option( + '-w', '--wheel-dir', + dest='wheel_dir', + metavar='dir', + default=DEFAULT_WHEEL_DIR, + help="Build wheels into , where the default is '/wheelhouse'.") + cmd_opts.add_option(cmdoptions.use_wheel) + cmd_opts.add_option( + '--build-option', + dest='build_options', + metavar='options', + action='append', + help="Extra arguments to be supplied to 'setup.py bdist_wheel'.") + cmd_opts.add_option(cmdoptions.requirements) + cmd_opts.add_option(cmdoptions.download_cache) + cmd_opts.add_option(cmdoptions.no_deps) + cmd_opts.add_option(cmdoptions.build_dir) + + cmd_opts.add_option( + '--global-option', + dest='global_options', + action='append', + metavar='options', + help="Extra global options to be supplied to the setup.py " + "call before the 'bdist_wheel' command.") + + cmd_opts.add_option( + '--pre', + action='store_true', + default=False, + help="Include pre-release and development versions. By default, pip only finds stable versions.") + + cmd_opts.add_option(cmdoptions.no_clean) + + index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def run(self, options, args): + + # confirm requirements + try: + import wheel.bdist_wheel + except ImportError: + raise CommandError("'pip wheel' requires bdist_wheel from the 'wheel' distribution.") + if not wheel_setuptools_support(): + raise CommandError("'pip wheel' requires %s." % setuptools_requirement) + + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index: + logger.notify('Ignoring indexes: %s' % ','.join(index_urls)) + index_urls = [] + + finder = PackageFinder(find_links=options.find_links, + index_urls=index_urls, + use_mirrors=options.use_mirrors, + mirrors=options.mirrors, + use_wheel=options.use_wheel, + allow_external=options.allow_external, + allow_insecure=options.allow_insecure, + allow_all_external=options.allow_all_external, + allow_all_insecure=options.allow_all_insecure, + allow_all_prereleases=options.pre, + ) + + options.build_dir = os.path.abspath(options.build_dir) + requirement_set = RequirementSet( + build_dir=options.build_dir, + src_dir=None, + download_dir=None, + download_cache=options.download_cache, + ignore_dependencies=options.ignore_dependencies, + ignore_installed=True) + + #parse args and/or requirements files + for name in args: + if name.endswith(".whl"): + logger.notify("ignoring %s" % name) + continue + requirement_set.add_requirement( + InstallRequirement.from_line(name, None)) + + for filename in options.requirements: + for req in parse_requirements(filename, finder=finder, options=options): + if req.editable or (req.name is None and req.url.endswith(".whl")): + logger.notify("ignoring %s" % req.url) + continue + requirement_set.add_requirement(req) + + #fail if no requirements + if not requirement_set.has_requirements: + opts = {'name': self.name} + msg = ('You must give at least one requirement ' + 'to %(name)s (see "pip help %(name)s")' % opts) + logger.error(msg) + return + + try: + #build wheels + wb = WheelBuilder( + requirement_set, + finder, + options.wheel_dir, + build_options = options.build_options or [], + global_options = options.global_options or [] + ) + wb.build() + except PreviousBuildDirError: + return + finally: + if not options.no_clean: + requirement_set.cleanup_files() + diff --git a/awx/lib/site-packages/pip/commands/zip.py b/awx/lib/site-packages/pip/commands/zip.py new file mode 100644 index 0000000000..11ac919846 --- /dev/null +++ b/awx/lib/site-packages/pip/commands/zip.py @@ -0,0 +1,348 @@ +import sys +import re +import fnmatch +import os +import shutil +import zipfile +from pip.util import display_path, backup_dir, rmtree +from pip.log import logger +from pip.exceptions import InstallationError +from pip.basecommand import Command + + +class ZipCommand(Command): + """Zip individual packages.""" + name = 'zip' + usage = """ + %prog [options] ...""" + summary = 'Zip individual packages.' + + def __init__(self, *args, **kw): + super(ZipCommand, self).__init__(*args, **kw) + if self.name == 'zip': + self.cmd_opts.add_option( + '--unzip', + action='store_true', + dest='unzip', + help='Unzip (rather than zip) a package.') + else: + self.cmd_opts.add_option( + '--zip', + action='store_false', + dest='unzip', + default=True, + help='Zip (rather than unzip) a package.') + self.cmd_opts.add_option( + '--no-pyc', + action='store_true', + dest='no_pyc', + help='Do not include .pyc files in zip files (useful on Google App Engine).') + self.cmd_opts.add_option( + '-l', '--list', + action='store_true', + dest='list', + help='List the packages available, and their zip status.') + self.cmd_opts.add_option( + '--sort-files', + action='store_true', + dest='sort_files', + help='With --list, sort packages according to how many files they contain.') + self.cmd_opts.add_option( + '--path', + action='append', + dest='paths', + help='Restrict operations to the given paths (may include wildcards).') + self.cmd_opts.add_option( + '-n', '--simulate', + action='store_true', + help='Do not actually perform the zip/unzip operation.') + + self.parser.insert_option_group(0, self.cmd_opts) + + def paths(self): + """All the entries of sys.path, possibly restricted by --path""" + if not self.select_paths: + return sys.path + result = [] + match_any = set() + for path in sys.path: + path = os.path.normcase(os.path.abspath(path)) + for match in self.select_paths: + match = os.path.normcase(os.path.abspath(match)) + if '*' in match: + if re.search(fnmatch.translate(match + '*'), path): + result.append(path) + match_any.add(match) + break + else: + if path.startswith(match): + result.append(path) + match_any.add(match) + break + else: + logger.debug("Skipping path %s because it doesn't match %s" + % (path, ', '.join(self.select_paths))) + for match in self.select_paths: + if match not in match_any and '*' not in match: + result.append(match) + logger.debug("Adding path %s because it doesn't match " + "anything already on sys.path" % match) + return result + + def run(self, options, args): + self.select_paths = options.paths + self.simulate = options.simulate + if options.list: + return self.list(options, args) + if not args: + raise InstallationError( + 'You must give at least one package to zip or unzip') + packages = [] + for arg in args: + module_name, filename = self.find_package(arg) + if options.unzip and os.path.isdir(filename): + raise InstallationError( + 'The module %s (in %s) is not a zip file; cannot be unzipped' + % (module_name, filename)) + elif not options.unzip and not os.path.isdir(filename): + raise InstallationError( + 'The module %s (in %s) is not a directory; cannot be zipped' + % (module_name, filename)) + packages.append((module_name, filename)) + last_status = None + for module_name, filename in packages: + if options.unzip: + last_status = self.unzip_package(module_name, filename) + else: + last_status = self.zip_package(module_name, filename, options.no_pyc) + return last_status + + def unzip_package(self, module_name, filename): + zip_filename = os.path.dirname(filename) + if not os.path.isfile(zip_filename) and zipfile.is_zipfile(zip_filename): + raise InstallationError( + 'Module %s (in %s) isn\'t located in a zip file in %s' + % (module_name, filename, zip_filename)) + package_path = os.path.dirname(zip_filename) + if not package_path in self.paths(): + logger.warn( + 'Unpacking %s into %s, but %s is not on sys.path' + % (display_path(zip_filename), display_path(package_path), + display_path(package_path))) + logger.notify('Unzipping %s (in %s)' % (module_name, display_path(zip_filename))) + if self.simulate: + logger.notify('Skipping remaining operations because of --simulate') + return + logger.indent += 2 + try: + ## FIXME: this should be undoable: + zip = zipfile.ZipFile(zip_filename) + to_save = [] + for info in zip.infolist(): + name = info.filename + if name.startswith(module_name + os.path.sep): + content = zip.read(name) + dest = os.path.join(package_path, name) + if not os.path.exists(os.path.dirname(dest)): + os.makedirs(os.path.dirname(dest)) + if not content and dest.endswith(os.path.sep): + if not os.path.exists(dest): + os.makedirs(dest) + else: + f = open(dest, 'wb') + f.write(content) + f.close() + else: + to_save.append((name, zip.read(name))) + zip.close() + if not to_save: + logger.info('Removing now-empty zip file %s' % display_path(zip_filename)) + os.unlink(zip_filename) + self.remove_filename_from_pth(zip_filename) + else: + logger.info('Removing entries in %s/ from zip file %s' % (module_name, display_path(zip_filename))) + zip = zipfile.ZipFile(zip_filename, 'w') + for name, content in to_save: + zip.writestr(name, content) + zip.close() + finally: + logger.indent -= 2 + + def zip_package(self, module_name, filename, no_pyc): + orig_filename = filename + logger.notify('Zip %s (in %s)' % (module_name, display_path(filename))) + logger.indent += 2 + if filename.endswith('.egg'): + dest_filename = filename + else: + dest_filename = filename + '.zip' + try: + ## FIXME: I think this needs to be undoable: + if filename == dest_filename: + filename = backup_dir(orig_filename) + logger.notify('Moving %s aside to %s' % (orig_filename, filename)) + if not self.simulate: + shutil.move(orig_filename, filename) + try: + logger.info('Creating zip file in %s' % display_path(dest_filename)) + if not self.simulate: + zip = zipfile.ZipFile(dest_filename, 'w') + zip.writestr(module_name + '/', '') + for dirpath, dirnames, filenames in os.walk(filename): + if no_pyc: + filenames = [f for f in filenames + if not f.lower().endswith('.pyc')] + for fns, is_dir in [(dirnames, True), (filenames, False)]: + for fn in fns: + full = os.path.join(dirpath, fn) + dest = os.path.join(module_name, dirpath[len(filename):].lstrip(os.path.sep), fn) + if is_dir: + zip.writestr(dest + '/', '') + else: + zip.write(full, dest) + zip.close() + logger.info('Removing old directory %s' % display_path(filename)) + if not self.simulate: + rmtree(filename) + except: + ## FIXME: need to do an undo here + raise + ## FIXME: should also be undone: + self.add_filename_to_pth(dest_filename) + finally: + logger.indent -= 2 + + def remove_filename_from_pth(self, filename): + for pth in self.pth_files(): + f = open(pth, 'r') + lines = f.readlines() + f.close() + new_lines = [ + l for l in lines if l.strip() != filename] + if lines != new_lines: + logger.info('Removing reference to %s from .pth file %s' + % (display_path(filename), display_path(pth))) + if not [line for line in new_lines if line]: + logger.info('%s file would be empty: deleting' % display_path(pth)) + if not self.simulate: + os.unlink(pth) + else: + if not self.simulate: + f = open(pth, 'wb') + f.writelines(new_lines) + f.close() + return + logger.warn('Cannot find a reference to %s in any .pth file' % display_path(filename)) + + def add_filename_to_pth(self, filename): + path = os.path.dirname(filename) + dest = filename + '.pth' + if path not in self.paths(): + logger.warn('Adding .pth file %s, but it is not on sys.path' % display_path(dest)) + if not self.simulate: + if os.path.exists(dest): + f = open(dest) + lines = f.readlines() + f.close() + if lines and not lines[-1].endswith('\n'): + lines[-1] += '\n' + lines.append(filename + '\n') + else: + lines = [filename + '\n'] + f = open(dest, 'wb') + f.writelines(lines) + f.close() + + def pth_files(self): + for path in self.paths(): + if not os.path.exists(path) or not os.path.isdir(path): + continue + for filename in os.listdir(path): + if filename.endswith('.pth'): + yield os.path.join(path, filename) + + def find_package(self, package): + for path in self.paths(): + full = os.path.join(path, package) + if os.path.exists(full): + return package, full + if not os.path.isdir(path) and zipfile.is_zipfile(path): + zip = zipfile.ZipFile(path, 'r') + try: + zip.read(os.path.join(package, '__init__.py')) + except KeyError: + pass + else: + zip.close() + return package, full + zip.close() + ## FIXME: need special error for package.py case: + raise InstallationError( + 'No package with the name %s found' % package) + + def list(self, options, args): + if args: + raise InstallationError( + 'You cannot give an argument with --list') + for path in sorted(self.paths()): + if not os.path.exists(path): + continue + basename = os.path.basename(path.rstrip(os.path.sep)) + if os.path.isfile(path) and zipfile.is_zipfile(path): + if os.path.dirname(path) not in self.paths(): + logger.notify('Zipped egg: %s' % display_path(path)) + continue + if (basename != 'site-packages' and basename != 'dist-packages' + and not path.replace('\\', '/').endswith('lib/python')): + continue + logger.notify('In %s:' % display_path(path)) + logger.indent += 2 + zipped = [] + unzipped = [] + try: + for filename in sorted(os.listdir(path)): + ext = os.path.splitext(filename)[1].lower() + if ext in ('.pth', '.egg-info', '.egg-link'): + continue + if ext == '.py': + logger.info('Not displaying %s: not a package' % display_path(filename)) + continue + full = os.path.join(path, filename) + if os.path.isdir(full): + unzipped.append((filename, self.count_package(full))) + elif zipfile.is_zipfile(full): + zipped.append(filename) + else: + logger.info('Unknown file: %s' % display_path(filename)) + if zipped: + logger.notify('Zipped packages:') + logger.indent += 2 + try: + for filename in zipped: + logger.notify(filename) + finally: + logger.indent -= 2 + else: + logger.notify('No zipped packages.') + if unzipped: + if options.sort_files: + unzipped.sort(key=lambda x: -x[1]) + logger.notify('Unzipped packages:') + logger.indent += 2 + try: + for filename, count in unzipped: + logger.notify('%s (%i files)' % (filename, count)) + finally: + logger.indent -= 2 + else: + logger.notify('No unzipped packages.') + finally: + logger.indent -= 2 + + def count_package(self, path): + total = 0 + for dirpath, dirnames, filenames in os.walk(path): + filenames = [f for f in filenames + if not f.lower().endswith('.pyc')] + total += len(filenames) + return total diff --git a/awx/lib/site-packages/pip/download.py b/awx/lib/site-packages/pip/download.py new file mode 100644 index 0000000000..653495f2a3 --- /dev/null +++ b/awx/lib/site-packages/pip/download.py @@ -0,0 +1,653 @@ +import cgi +import getpass +import hashlib +import mimetypes +import os +import platform +import re +import shutil +import socket +import ssl +import sys +import tempfile + +import pip + +from pip.backwardcompat import (urllib, urllib2, httplib, + urlparse, string_types, get_http_message_param, + match_hostname, CertificateError) +from pip.exceptions import InstallationError, HashMismatch +from pip.util import (splitext, rmtree, format_size, display_path, + backup_dir, ask_path_exists, unpack_file, + create_download_cache_folder, cache_download) +from pip.vcs import vcs +from pip.log import logger +from pip.locations import default_cert_path + +__all__ = ['get_file_content', 'urlopen', + 'is_url', 'url_to_path', 'path_to_url', 'path_to_url2', + 'geturl', 'is_archive_file', 'unpack_vcs_link', + 'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url'] + + +def build_user_agent(): + """Return a string representing the user agent.""" + _implementation = platform.python_implementation() + + if _implementation == 'CPython': + _implementation_version = platform.python_version() + elif _implementation == 'PyPy': + _implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro) + if sys.pypy_version_info.releaselevel != 'final': + _implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel]) + elif _implementation == 'Jython': + _implementation_version = platform.python_version() # Complete Guess + elif _implementation == 'IronPython': + _implementation_version = platform.python_version() # Complete Guess + else: + _implementation_version = 'Unknown' + + try: + p_system = platform.system() + p_release = platform.release() + except IOError: + p_system = 'Unknown' + p_release = 'Unknown' + + return " ".join(['pip/%s' % pip.__version__, + '%s/%s' % (_implementation, _implementation_version), + '%s/%s' % (p_system, p_release)]) + + +def get_file_content(url, comes_from=None): + """Gets the content of a file; it may be a filename, file: URL, or + http: URL. Returns (location, content). Content is unicode.""" + match = _scheme_re.search(url) + if match: + scheme = match.group(1).lower() + if (scheme == 'file' and comes_from + and comes_from.startswith('http')): + raise InstallationError( + 'Requirements file %s references URL %s, which is local' + % (comes_from, url)) + if scheme == 'file': + path = url.split(':', 1)[1] + path = path.replace('\\', '/') + match = _url_slash_drive_re.match(path) + if match: + path = match.group(1) + ':' + path.split('|', 1)[1] + path = urllib.unquote(path) + if path.startswith('/'): + path = '/' + path.lstrip('/') + url = path + else: + ## FIXME: catch some errors + resp = urlopen(url) + encoding = get_http_message_param(resp.headers, 'charset', 'utf-8') + return geturl(resp), resp.read().decode(encoding) + try: + f = open(url) + content = f.read() + except IOError: + e = sys.exc_info()[1] + raise InstallationError('Could not open requirements file: %s' % str(e)) + else: + f.close() + return url, content + + +_scheme_re = re.compile(r'^(http|https|file):', re.I) +_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I) + +class VerifiedHTTPSConnection(httplib.HTTPSConnection): + """ + A connection that wraps connections with ssl certificate verification. + """ + def connect(self): + + self.connection_kwargs = {} + + #TODO: refactor compatibility logic into backwardcompat? + + # for > py2.5 + if hasattr(self, 'timeout'): + self.connection_kwargs.update(timeout = self.timeout) + + # for >= py2.7 + if hasattr(self, 'source_address'): + self.connection_kwargs.update(source_address = self.source_address) + + sock = socket.create_connection((self.host, self.port), **self.connection_kwargs) + + # for >= py2.7 + if getattr(self, '_tunnel_host', None): + self.sock = sock + self._tunnel() + + # get alternate bundle or use our included bundle + cert_path = os.environ.get('PIP_CERT', '') or default_cert_path + + self.sock = ssl.wrap_socket(sock, + self.key_file, + self.cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=cert_path) + + try: + match_hostname(self.sock.getpeercert(), self.host) + except CertificateError: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + + +class VerifiedHTTPSHandler(urllib2.HTTPSHandler): + """ + A HTTPSHandler that uses our own VerifiedHTTPSConnection. + """ + def __init__(self, connection_class = VerifiedHTTPSConnection): + self.specialized_conn_class = connection_class + urllib2.HTTPSHandler.__init__(self) + def https_open(self, req): + return self.do_open(self.specialized_conn_class, req) + + +class URLOpener(object): + """ + pip's own URL helper that adds HTTP auth and proxy support + """ + def __init__(self): + self.passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + self.proxy_handler = None + + def __call__(self, url): + """ + If the given url contains auth info or if a normal request gets a 401 + response, an attempt is made to fetch the resource using basic HTTP + auth. + + """ + url, username, password, scheme = self.extract_credentials(url) + if username is None: + try: + response = self.get_opener(scheme=scheme).open(url) + except urllib2.HTTPError: + e = sys.exc_info()[1] + if e.code != 401: + raise + response = self.get_response(url) + else: + response = self.get_response(url, username, password) + return response + + def get_request(self, url): + """ + Wraps the URL to retrieve to protects against "creative" + interpretation of the RFC: http://bugs.python.org/issue8732 + """ + if isinstance(url, string_types): + url = urllib2.Request(url, headers={'Accept-encoding': 'identity'}) + return url + + def get_response(self, url, username=None, password=None): + """ + does the dirty work of actually getting the rsponse object using urllib2 + and its HTTP auth builtins. + """ + scheme, netloc, path, query, frag = urlparse.urlsplit(url) + req = self.get_request(url) + + stored_username, stored_password = self.passman.find_user_password(None, netloc) + # see if we have a password stored + if stored_username is None: + if username is None and self.prompting: + username = urllib.quote(raw_input('User for %s: ' % netloc)) + password = urllib.quote(getpass.getpass('Password: ')) + if username and password: + self.passman.add_password(None, netloc, username, password) + stored_username, stored_password = self.passman.find_user_password(None, netloc) + authhandler = urllib2.HTTPBasicAuthHandler(self.passman) + opener = self.get_opener(authhandler, scheme=scheme) + # FIXME: should catch a 401 and offer to let the user reenter credentials + return opener.open(req) + + def get_opener(self, *args, **kwargs): + """ + Build an OpenerDirector instance based on the scheme and proxy option + """ + + args = list(args) + if self.proxy_handler: + args.extend([self.proxy_handler, urllib2.CacheFTPHandler]) + + if kwargs.get('scheme') == 'https': + https_handler = VerifiedHTTPSHandler() + director = urllib2.build_opener(https_handler, *args) + #strip out HTTPHandler to prevent MITM spoof + for handler in director.handlers: + if isinstance(handler, urllib2.HTTPHandler): + director.handlers.remove(handler) + else: + director = urllib2.build_opener(*args) + + # Add our new headers to the opener + headers = [x for x in director.addheaders if x[0].lower() != "user-agent"] + headers.append(("User-agent", build_user_agent())) + director.addheaders = headers + + return director + + def setup(self, proxystr='', prompting=True): + """ + Sets the proxy handler given the option passed on the command + line. If an empty string is passed it looks at the HTTP_PROXY + environment variable. + """ + self.prompting = prompting + proxy = self.get_proxy(proxystr) + if proxy: + self.proxy_handler = urllib2.ProxyHandler({"http": proxy, "ftp": proxy, "https": proxy}) + + def parse_credentials(self, netloc): + if "@" in netloc: + userinfo = netloc.rsplit("@", 1)[0] + if ":" in userinfo: + return userinfo.split(":", 1) + return userinfo, None + return None, None + + def extract_credentials(self, url): + """ + Extracts user/password from a url. + + Returns a tuple: + (url-without-auth, username, password) + """ + if isinstance(url, urllib2.Request): + result = urlparse.urlsplit(url.get_full_url()) + else: + result = urlparse.urlsplit(url) + scheme, netloc, path, query, frag = result + + username, password = self.parse_credentials(netloc) + if username is None: + return url, None, None, scheme + elif password is None and self.prompting: + # remove the auth credentials from the url part + netloc = netloc.replace('%s@' % username, '', 1) + # prompt for the password + prompt = 'Password for %s@%s: ' % (username, netloc) + password = urllib.quote(getpass.getpass(prompt)) + else: + # remove the auth credentials from the url part + netloc = netloc.replace('%s:%s@' % (username, password), '', 1) + + target_url = urlparse.urlunsplit((scheme, netloc, path, query, frag)) + return target_url, username, password, scheme + + def get_proxy(self, proxystr=''): + """ + Get the proxy given the option passed on the command line. + If an empty string is passed it looks at the HTTP_PROXY + environment variable. + """ + if not proxystr: + proxystr = os.environ.get('HTTP_PROXY', '') + if proxystr: + if '@' in proxystr: + user_password, server_port = proxystr.split('@', 1) + if ':' in user_password: + user, password = user_password.split(':', 1) + else: + user = user_password + prompt = 'Password for %s@%s: ' % (user, server_port) + password = urllib.quote(getpass.getpass(prompt)) + return '%s:%s@%s' % (user, password, server_port) + else: + return proxystr + else: + return None + +urlopen = URLOpener() + + +def is_url(name): + """Returns true if the name looks like a URL""" + if ':' not in name: + return False + scheme = name.split(':', 1)[0].lower() + return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes + + +def url_to_path(url): + """ + Convert a file: URL to a path. + """ + assert url.startswith('file:'), ( + "You can only turn file: urls into filenames (not %r)" % url) + path = url[len('file:'):].lstrip('/') + path = urllib.unquote(path) + if _url_drive_re.match(path): + path = path[0] + ':' + path[2:] + else: + path = '/' + path + return path + + +_drive_re = re.compile('^([a-z]):', re.I) +_url_drive_re = re.compile('^([a-z])[:|]', re.I) + + +def path_to_url(path): + """ + Convert a path to a file: URL. The path will be made absolute. + """ + path = os.path.normcase(os.path.abspath(path)) + if _drive_re.match(path): + path = path[0] + '|' + path[2:] + url = urllib.quote(path) + url = url.replace(os.path.sep, '/') + url = url.lstrip('/') + return 'file:///' + url + + +def path_to_url2(path): + """ + Convert a path to a file: URL. The path will be made absolute and have + quoted path parts. + """ + path = os.path.normpath(os.path.abspath(path)) + drive, path = os.path.splitdrive(path) + filepath = path.split(os.path.sep) + url = '/'.join([urllib.quote(part) for part in filepath]) + if not drive: + url = url.lstrip('/') + return 'file:///' + drive + url + + +def geturl(urllib2_resp): + """ + Use instead of urllib.addinfourl.geturl(), which appears to have + some issues with dropping the double slash for certain schemes + (e.g. file://). This implementation is probably over-eager, as it + always restores '://' if it is missing, and it appears some url + schemata aren't always followed by '//' after the colon, but as + far as I know pip doesn't need any of those. + The URI RFC can be found at: http://tools.ietf.org/html/rfc1630 + + This function assumes that + scheme:/foo/bar + is the same as + scheme:///foo/bar + """ + url = urllib2_resp.geturl() + scheme, rest = url.split(':', 1) + if rest.startswith('//'): + return url + else: + # FIXME: write a good test to cover it + return '%s://%s' % (scheme, rest) + + +def is_archive_file(name): + """Return True if `name` is a considered as an archive file.""" + archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle', + '.whl') + ext = splitext(name)[1].lower() + if ext in archives: + return True + return False + + +def unpack_vcs_link(link, location, only_download=False): + vcs_backend = _get_used_vcs_backend(link) + if only_download: + vcs_backend.export(location) + else: + vcs_backend.unpack(location) + + +def unpack_file_url(link, location): + source = url_to_path(link.url) + content_type = mimetypes.guess_type(source)[0] + if os.path.isdir(source): + # delete the location since shutil will create it again :( + if os.path.isdir(location): + rmtree(location) + shutil.copytree(source, location) + else: + unpack_file(source, location, content_type, link) + + +def _get_used_vcs_backend(link): + for backend in vcs.backends: + if link.scheme in backend.schemes: + vcs_backend = backend(link.url) + return vcs_backend + + +def is_vcs_url(link): + return bool(_get_used_vcs_backend(link)) + + +def is_file_url(link): + return link.url.lower().startswith('file:') + + +def _check_hash(download_hash, link): + if download_hash.digest_size != hashlib.new(link.hash_name).digest_size: + logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!" + % (download_hash.digest_size, link, link.hash_name)) + raise HashMismatch('Hash name mismatch for package %s' % link) + if download_hash.hexdigest() != link.hash: + logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!" + % (link, download_hash.hexdigest(), link.hash)) + raise HashMismatch('Bad %s hash for package %s' % (link.hash_name, link)) + + +def _get_hash_from_file(target_file, link): + try: + download_hash = hashlib.new(link.hash_name) + except (ValueError, TypeError): + logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link)) + return None + + fp = open(target_file, 'rb') + while True: + chunk = fp.read(4096) + if not chunk: + break + download_hash.update(chunk) + fp.close() + return download_hash + + +def _download_url(resp, link, temp_location): + fp = open(temp_location, 'wb') + download_hash = None + if link.hash and link.hash_name: + try: + download_hash = hashlib.new(link.hash_name) + except ValueError: + logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link)) + try: + total_length = int(resp.info()['content-length']) + except (ValueError, KeyError, TypeError): + total_length = 0 + downloaded = 0 + show_progress = total_length > 40 * 1000 or not total_length + show_url = link.show_url + try: + if show_progress: + ## FIXME: the URL can get really long in this message: + if total_length: + logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length))) + else: + logger.start_progress('Downloading %s (unknown size): ' % show_url) + else: + logger.notify('Downloading %s' % show_url) + logger.info('Downloading from URL %s' % link) + + while True: + chunk = resp.read(4096) + if not chunk: + break + downloaded += len(chunk) + if show_progress: + if not total_length: + logger.show_progress('%s' % format_size(downloaded)) + else: + logger.show_progress('%3i%% %s' % (100 * downloaded / total_length, format_size(downloaded))) + if download_hash is not None: + download_hash.update(chunk) + fp.write(chunk) + fp.close() + finally: + if show_progress: + logger.end_progress('%s downloaded' % format_size(downloaded)) + return download_hash + + +def _copy_file(filename, location, content_type, link): + copy = True + download_location = os.path.join(location, link.filename) + if os.path.exists(download_location): + response = ask_path_exists( + 'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' % + display_path(download_location), ('i', 'w', 'b')) + if response == 'i': + copy = False + elif response == 'w': + logger.warn('Deleting %s' % display_path(download_location)) + os.remove(download_location) + elif response == 'b': + dest_file = backup_dir(download_location) + logger.warn('Backing up %s to %s' + % (display_path(download_location), display_path(dest_file))) + shutil.move(download_location, dest_file) + if copy: + shutil.copy(filename, download_location) + logger.indent -= 2 + logger.notify('Saved %s' % display_path(download_location)) + + +def unpack_http_url(link, location, download_cache, download_dir=None): + temp_dir = tempfile.mkdtemp('-unpack', 'pip-') + temp_location = None + target_url = link.url.split('#', 1)[0] + + already_cached = False + cache_file = None + cache_content_type_file = None + download_hash = None + if download_cache: + cache_file = os.path.join(download_cache, + urllib.quote(target_url, '')) + cache_content_type_file = cache_file + '.content-type' + already_cached = ( + os.path.exists(cache_file) and + os.path.exists(cache_content_type_file) + ) + if not os.path.isdir(download_cache): + create_download_cache_folder(download_cache) + + already_downloaded = None + if download_dir: + already_downloaded = os.path.join(download_dir, link.filename) + if not os.path.exists(already_downloaded): + already_downloaded = None + + if already_downloaded: + temp_location = already_downloaded + content_type = mimetypes.guess_type(already_downloaded)[0] + logger.notify('File was already downloaded %s' % already_downloaded) + if link.hash: + download_hash = _get_hash_from_file(temp_location, link) + try: + _check_hash(download_hash, link) + except HashMismatch: + logger.warn( + 'Previously-downloaded file %s has bad hash, ' + 're-downloading.' % temp_location + ) + temp_location = None + os.unlink(already_downloaded) + already_downloaded = None + + # We have a cached file, and we haven't already found a good downloaded copy + if already_cached and not temp_location: + with open(cache_content_type_file) as fp: + content_type = fp.read().strip() + temp_location = cache_file + logger.notify('Using download cache from %s' % cache_file) + if link.hash and link.hash_name: + download_hash = _get_hash_from_file(cache_file, link) + try: + _check_hash(download_hash, link) + except HashMismatch: + logger.warn( + 'Cached file %s has bad hash, ' + 're-downloading.' % temp_location + ) + temp_location = None + os.unlink(cache_file) + os.unlink(cache_content_type_file) + already_cached = False + + # We don't have either a cached or a downloaded copy + if not temp_location: + resp = _get_response_from_url(target_url, link) + content_type = resp.info().get('content-type', '') + filename = link.filename # fallback + # Have a look at the Content-Disposition header for a better guess + content_disposition = resp.info().get('content-disposition') + if content_disposition: + type, params = cgi.parse_header(content_disposition) + # We use ``or`` here because we don't want to use an "empty" value + # from the filename param. + filename = params.get('filename') or filename + ext = splitext(filename)[1] + if not ext: + ext = mimetypes.guess_extension(content_type) + if ext: + filename += ext + if not ext and link.url != geturl(resp): + ext = os.path.splitext(geturl(resp))[1] + if ext: + filename += ext + temp_location = os.path.join(temp_dir, filename) + download_hash = _download_url(resp, link, temp_location) + if link.hash and link.hash_name: + _check_hash(download_hash, link) + + if download_dir and not already_downloaded: + _copy_file(temp_location, download_dir, content_type, link) + unpack_file(temp_location, location, content_type, link) + if cache_file and not already_cached: + cache_download(cache_file, temp_location, content_type) + if not (already_cached or already_downloaded): + os.unlink(temp_location) + os.rmdir(temp_dir) + + +def _get_response_from_url(target_url, link): + try: + resp = urlopen(target_url) + except urllib2.HTTPError: + e = sys.exc_info()[1] + logger.fatal("HTTP error %s while getting %s" % (e.code, link)) + raise + except IOError: + e = sys.exc_info()[1] + # Typically an FTP error + logger.fatal("Error %s while getting %s" % (e, link)) + raise + return resp + + +class Urllib2HeadRequest(urllib2.Request): + def get_method(self): + return "HEAD" diff --git a/awx/lib/site-packages/pip/exceptions.py b/awx/lib/site-packages/pip/exceptions.py new file mode 100644 index 0000000000..55158af361 --- /dev/null +++ b/awx/lib/site-packages/pip/exceptions.py @@ -0,0 +1,38 @@ +"""Exceptions used throughout package""" + + +class PipError(Exception): + """Base pip exception""" + + +class InstallationError(PipError): + """General exception during installation""" + + +class UninstallationError(PipError): + """General exception during uninstallation""" + + +class DistributionNotFound(InstallationError): + """Raised when a distribution cannot be found to satisfy a requirement""" + + +class BestVersionAlreadyInstalled(PipError): + """Raised when the most up-to-date version of a package is already + installed. """ + + +class BadCommand(PipError): + """Raised when virtualenv or a command is not found""" + + +class CommandError(PipError): + """Raised when there is an error in command-line arguments""" + + +class PreviousBuildDirError(PipError): + """Raised when there's a previous conflicting build directory""" + + +class HashMismatch(InstallationError): + """Distribution file hash values don't match.""" diff --git a/awx/lib/site-packages/pip/index.py b/awx/lib/site-packages/pip/index.py new file mode 100644 index 0000000000..98a6cac5b5 --- /dev/null +++ b/awx/lib/site-packages/pip/index.py @@ -0,0 +1,1041 @@ +"""Routines related to PyPI, indexes""" + +import sys +import os +import re +import gzip +import mimetypes +import posixpath +import pkg_resources +import random +import socket +import ssl +import string +import zlib + +try: + import threading +except ImportError: + import dummy_threading as threading + +from pip.log import logger +from pip.util import Inf, normalize_name, splitext, is_prerelease +from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled,\ + InstallationError +from pip.backwardcompat import (WindowsError, BytesIO, + Queue, urlparse, + URLError, HTTPError, u, + product, url2pathname, + Empty as QueueEmpty) +from pip.backwardcompat import CertificateError +from pip.download import urlopen, path_to_url2, url_to_path, geturl, Urllib2HeadRequest +from pip.wheel import Wheel, wheel_ext, wheel_setuptools_support, setuptools_requirement +from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform +from pip.vendor import html5lib + +__all__ = ['PackageFinder'] + + +DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org" + + +class PackageFinder(object): + """This finds packages. + + This is meant to match easy_install's technique for looking for + packages, by reading pages and looking for appropriate links + """ + + def __init__(self, find_links, index_urls, + use_mirrors=False, mirrors=None, main_mirror_url=None, + use_wheel=False, allow_external=[], allow_insecure=[], + allow_all_external=False, allow_all_insecure=False, + allow_all_prereleases=False): + self.find_links = find_links + self.index_urls = index_urls + self.dependency_links = [] + self.cache = PageCache() + # These are boring links that have already been logged somehow: + self.logged_links = set() + if use_mirrors: + self.mirror_urls = self._get_mirror_urls(mirrors, main_mirror_url) + logger.info('Using PyPI mirrors: %s' % ', '.join(self.mirror_urls)) + else: + self.mirror_urls = [] + self.use_wheel = use_wheel + + # Do we allow (safe and verifiable) externally hosted files? + self.allow_external = set(normalize_name(n) for n in allow_external) + + # Which names are allowed to install insecure and unverifiable files? + self.allow_insecure = set(normalize_name(n) for n in allow_insecure) + + # Do we allow all (safe and verifiable) externally hosted files? + self.allow_all_external = allow_all_external + + # Do we allow unsafe and unverifiable files? + self.allow_all_insecure = allow_all_insecure + + # Stores if we ignored any external links so that we can instruct + # end users how to install them if no distributions are available + self.need_warn_external = False + + # Stores if we ignored any unsafe links so that we can instruct + # end users how to install them if no distributions are available + self.need_warn_insecure = False + + # Do we want to allow _all_ pre-releases? + self.allow_all_prereleases = allow_all_prereleases + + @property + def use_wheel(self): + return self._use_wheel + + @use_wheel.setter + def use_wheel(self, value): + self._use_wheel = value + if self._use_wheel and not wheel_setuptools_support(): + raise InstallationError("pip's wheel support requires %s." % setuptools_requirement) + + def add_dependency_links(self, links): + ## FIXME: this shouldn't be global list this, it should only + ## apply to requirements of the package that specifies the + ## dependency_links value + ## FIXME: also, we should track comes_from (i.e., use Link) + self.dependency_links.extend(links) + + def _sort_locations(self, locations): + """ + Sort locations into "files" (archives) and "urls", and return + a pair of lists (files,urls) + """ + files = [] + urls = [] + + # puts the url for the given file path into the appropriate list + def sort_path(path): + url = path_to_url2(path) + if mimetypes.guess_type(url, strict=False)[0] == 'text/html': + urls.append(url) + else: + files.append(url) + + for url in locations: + + is_local_path = os.path.exists(url) + is_file_url = url.startswith('file:') + is_find_link = url in self.find_links + + if is_local_path or is_file_url: + if is_local_path: + path = url + else: + path = url_to_path(url) + if is_find_link and os.path.isdir(path): + path = os.path.realpath(path) + for item in os.listdir(path): + sort_path(os.path.join(path, item)) + elif is_file_url and os.path.isdir(path): + urls.append(url) + elif os.path.isfile(path): + sort_path(path) + else: + urls.append(url) + + return files, urls + + def _link_sort_key(self, link_tuple): + """ + Function used to generate link sort key for link tuples. + The greater the return value, the more preferred it is. + If not finding wheels, then sorted by version only. + If finding wheels, then the sort order is by version, then: + 1. existing installs + 2. wheels ordered via Wheel.support_index_min() + 3. source archives + Note: it was considered to embed this logic into the Link + comparison operators, but then different sdist links + with the same version, would have to be considered equal + """ + parsed_version, link, _ = link_tuple + if self.use_wheel: + support_num = len(supported_tags) + if link == InfLink: # existing install + pri = 1 + elif link.wheel: + # all wheel links are known to be supported at this stage + pri = -(link.wheel.support_index_min()) + else: # sdist + pri = -(support_num) + return (parsed_version, pri) + else: + return parsed_version + + def _sort_versions(self, applicable_versions): + """ + Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary. + See the docstring for `_link_sort_key` for details. + This function is isolated for easier unit testing. + """ + return sorted(applicable_versions, key=self._link_sort_key, reverse=True) + + def find_requirement(self, req, upgrade): + + def mkurl_pypi_url(url): + loc = posixpath.join(url, url_name) + # For maximum compatibility with easy_install, ensure the path + # ends in a trailing slash. Although this isn't in the spec + # (and PyPI can handle it without the slash) some other index + # implementations might break if they relied on easy_install's behavior. + if not loc.endswith('/'): + loc = loc + '/' + return loc + + url_name = req.url_name + # Only check main index if index URL is given: + main_index_url = None + if self.index_urls: + # Check that we have the url_name correctly spelled: + main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True) + # This will also cache the page, so it's okay that we get it again later: + page = self._get_page(main_index_url, req) + if page is None: + url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name + + # Combine index URLs with mirror URLs here to allow + # adding more index URLs from requirements files + all_index_urls = self.index_urls + self.mirror_urls + + if url_name is not None: + locations = [ + mkurl_pypi_url(url) + for url in all_index_urls] + self.find_links + else: + locations = list(self.find_links) + for version in req.absolute_versions: + if url_name is not None and main_index_url is not None: + locations = [ + posixpath.join(main_index_url.url, version)] + locations + + file_locations, url_locations = self._sort_locations(locations) + _flocations, _ulocations = self._sort_locations(self.dependency_links) + file_locations.extend(_flocations) + + # We trust every url that the user has given us whether it was given + # via --index-url, --user-mirrors/--mirror, or --find-links or a + # default option thereof + locations = [Link(url, trusted=True) for url in url_locations] + + # We explicitly do not trust links that came from dependency_links + locations.extend([Link(url) for url in _ulocations]) + + logger.debug('URLs to search for versions for %s:' % req) + for location in locations: + logger.debug('* %s' % location) + found_versions = [] + found_versions.extend( + self._package_versions( + # We trust every directly linked archive in find_links + [Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower())) + page_versions = [] + for page in self._get_pages(locations, req): + logger.debug('Analyzing links from page %s' % page.url) + logger.indent += 2 + try: + page_versions.extend(self._package_versions(page.links, req.name.lower())) + finally: + logger.indent -= 2 + dependency_versions = list(self._package_versions( + [Link(url) for url in self.dependency_links], req.name.lower())) + if dependency_versions: + logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions])) + file_versions = list(self._package_versions( + [Link(url) for url in file_locations], req.name.lower())) + if not found_versions and not page_versions and not dependency_versions and not file_versions: + logger.fatal('Could not find any downloads that satisfy the requirement %s' % req) + + if self.need_warn_external: + logger.warn("Some externally hosted files were ignored (use " + "--allow-external %s to allow)." % req.name) + + if self.need_warn_insecure: + logger.warn("Some insecure and unverifiable files were ignored" + " (use --allow-insecure %s to allow)." % req.name) + + raise DistributionNotFound('No distributions at all found for %s' % req) + installed_version = [] + if req.satisfied_by is not None: + installed_version = [(req.satisfied_by.parsed_version, InfLink, req.satisfied_by.version)] + if file_versions: + file_versions.sort(reverse=True) + logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions])) + #this is an intentional priority ordering + all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions + applicable_versions = [] + for (parsed_version, link, version) in all_versions: + if version not in req.req: + logger.info("Ignoring link %s, version %s doesn't match %s" + % (link, version, ','.join([''.join(s) for s in req.req.specs]))) + continue + elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases): + # If this version isn't the already installed one, then + # ignore it if it's a pre-release. + if link is not InfLink: + logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version)) + continue + applicable_versions.append((parsed_version, link, version)) + applicable_versions = self._sort_versions(applicable_versions) + existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is InfLink]) + if not upgrade and existing_applicable: + if applicable_versions[0][1] is InfLink: + logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement' + % req.satisfied_by.version) + else: + logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)' + % (req.satisfied_by.version, applicable_versions[0][2])) + return None + if not applicable_versions: + logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)' + % (req, ', '.join([version for parsed_version, link, version in all_versions]))) + + if self.need_warn_external: + logger.warn("Some externally hosted files were ignored (use " + "--allow-external to allow).") + + if self.need_warn_insecure: + logger.warn("Some insecure and unverifiable files were ignored" + " (use --allow-insecure %s to allow)." % req.name) + + raise DistributionNotFound('No distributions matching the version for %s' % req) + if applicable_versions[0][1] is InfLink: + # We have an existing version, and its the best version + logger.info('Installed version (%s) is most up-to-date (past versions: %s)' + % (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none')) + raise BestVersionAlreadyInstalled + if len(applicable_versions) > 1: + logger.info('Using version %s (newest of versions: %s)' % + (applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions]))) + + selected_version = applicable_versions[0][1] + + # TODO: Remove after 1.4 has been released + if (selected_version.internal is not None + and not selected_version.internal): + logger.warn("You are installing an externally hosted file. Future " + "versions of pip will default to disallowing " + "externally hosted files.") + + if (selected_version.verifiable is not None + and not selected_version.verifiable): + logger.warn("You are installing a potentially insecure and " + "unverifiable file. Future versions of pip will " + "default to disallowing insecure files.") + + return selected_version + + + def _find_url_name(self, index_url, url_name, req): + """Finds the true URL name of a package, when the given name isn't quite correct. + This is usually used to implement case-insensitivity.""" + if not index_url.url.endswith('/'): + # Vaguely part of the PyPI API... weird but true. + ## FIXME: bad to modify this? + index_url.url += '/' + page = self._get_page(index_url, req) + if page is None: + logger.fatal('Cannot fetch index base URL %s' % index_url) + return + norm_name = normalize_name(req.url_name) + for link in page.links: + base = posixpath.basename(link.path.rstrip('/')) + if norm_name == normalize_name(base): + logger.notify('Real name of requirement %s is %s' % (url_name, base)) + return base + return None + + def _get_pages(self, locations, req): + """Yields (page, page_url) from the given locations, skipping + locations that have errors, and adding download/homepage links""" + pending_queue = Queue() + for location in locations: + pending_queue.put(location) + done = [] + seen = set() + threads = [] + for i in range(min(10, len(locations))): + t = threading.Thread(target=self._get_queued_page, args=(req, pending_queue, done, seen)) + t.setDaemon(True) + threads.append(t) + t.start() + for t in threads: + t.join() + return done + + _log_lock = threading.Lock() + + def _get_queued_page(self, req, pending_queue, done, seen): + while 1: + try: + location = pending_queue.get(False) + except QueueEmpty: + return + if location in seen: + continue + seen.add(location) + page = self._get_page(location, req) + if page is None: + continue + done.append(page) + for link in page.rel_links(): + normalized = normalize_name(req.name).lower() + + if (not normalized in self.allow_external + and not self.allow_all_external): + self.need_warn_external = True + logger.debug("Not searching %s for files because external " + "urls are disallowed." % link) + continue + + if (link.trusted is not None + and not link.trusted + and not normalized in self.allow_insecure + and not self.allow_all_insecure): # TODO: Remove after release + logger.debug("Not searching %s for urls, it is an " + "untrusted link and cannot produce safe or " + "verifiable files." % link) + self.need_warn_insecure = True + continue + + pending_queue.put(link) + + _egg_fragment_re = re.compile(r'#egg=([^&]*)') + _egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I) + _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') + + def _sort_links(self, links): + "Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates" + eggs, no_eggs = [], [] + seen = set() + for link in links: + if link not in seen: + seen.add(link) + if link.egg_fragment: + eggs.append(link) + else: + no_eggs.append(link) + return no_eggs + eggs + + def _package_versions(self, links, search_name): + for link in self._sort_links(links): + for v in self._link_package_versions(link, search_name): + yield v + + def _known_extensions(self): + extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip') + if self.use_wheel: + return extensions + (wheel_ext,) + return extensions + + def _link_package_versions(self, link, search_name): + """ + Return an iterable of triples (pkg_resources_version_key, + link, python_version) that can be extracted from the given + link. + + Meant to be overridden by subclasses, not called by clients. + """ + platform = get_platform() + + version = None + if link.egg_fragment: + egg_info = link.egg_fragment + else: + egg_info, ext = link.splitext() + if not ext: + if link not in self.logged_links: + logger.debug('Skipping link %s; not a file' % link) + self.logged_links.add(link) + return [] + if egg_info.endswith('.tar'): + # Special double-extension case: + egg_info = egg_info[:-4] + ext = '.tar' + ext + if ext not in self._known_extensions(): + if link not in self.logged_links: + logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext)) + self.logged_links.add(link) + return [] + if "macosx10" in link.path and ext == '.zip': + if link not in self.logged_links: + logger.debug('Skipping link %s; macosx10 one' % (link)) + self.logged_links.add(link) + return [] + if link.wheel and link.wheel.name.lower() == search_name.lower(): + version = link.wheel.version + if not link.wheel.supported(): + logger.debug('Skipping %s because it is not compatible with this Python' % link) + return [] + + # This is a dirty hack to prevent installing Binary Wheels from + # PyPI or one of its mirrors unless it is a Windows Binary + # Wheel. This is paired with a change to PyPI disabling + # uploads for the same. Once we have a mechanism for enabling + # support for binary wheels on linux that deals with the + # inherent problems of binary distribution this can be + # removed. + comes_from = getattr(link, "comes_from", None) + if (not platform.startswith('win') + and comes_from is not None + and urlparse.urlparse(comes_from.url).netloc.endswith( + "pypi.python.org")): + if not link.wheel.supported(tags=supported_tags_noarch): + logger.debug( + "Skipping %s because it is a pypi-hosted binary " + "Wheel on an unsupported platform" % link + ) + return [] + + if not version: + version = self._egg_info_matches(egg_info, search_name, link) + if version is None: + logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name)) + return [] + + if (link.internal is not None + and not link.internal + and not normalize_name(search_name).lower() in self.allow_external + and not self.allow_all_external): + # We have a link that we are sure is external, so we should skip + # it unless we are allowing externals + logger.debug("Skipping %s because it is externally hosted." % link) + self.need_warn_external = True + return [] + + if (link.verifiable is not None + and not link.verifiable + and not normalize_name(search_name).lower() in self.allow_insecure + and not self.allow_all_insecure): # TODO: Remove after release + # We have a link that we are sure we cannot verify it's integrity, + # so we should skip it unless we are allowing unsafe installs + # for this requirement. + logger.debug("Skipping %s because it is an insecure and " + "unverifiable file." % link) + self.need_warn_insecure = True + return [] + + match = self._py_version_re.search(version) + if match: + version = version[:match.start()] + py_version = match.group(1) + if py_version != sys.version[:3]: + logger.debug('Skipping %s because Python version is incorrect' % link) + return [] + logger.debug('Found link %s, version: %s' % (link, version)) + return [(pkg_resources.parse_version(version), + link, + version)] + + def _egg_info_matches(self, egg_info, search_name, link): + match = self._egg_info_re.search(egg_info) + if not match: + logger.debug('Could not parse version from link: %s' % link) + return None + name = match.group(0).lower() + # To match the "safe" name that pkg_resources creates: + name = name.replace('_', '-') + # project name and version must be separated by a dash + look_for = search_name.lower() + "-" + if name.startswith(look_for): + return match.group(0)[len(look_for):] + else: + return None + + def _get_page(self, link, req): + return HTMLPage.get_page(link, req, cache=self.cache) + + def _get_mirror_urls(self, mirrors=None, main_mirror_url=None): + """Retrieves a list of URLs from the main mirror DNS entry + unless a list of mirror URLs are passed. + """ + if not mirrors: + mirrors = get_mirrors(main_mirror_url) + # Should this be made "less random"? E.g. netselect like? + random.shuffle(mirrors) + + mirror_urls = set() + for mirror_url in mirrors: + mirror_url = mirror_url.rstrip('/') + # Make sure we have a valid URL + if not any([mirror_url.startswith(scheme) for scheme in ["http://", "https://", "file://"]]): + mirror_url = "http://%s" % mirror_url + if not mirror_url.endswith("/simple"): + mirror_url = "%s/simple" % mirror_url + mirror_urls.add(mirror_url + '/') + + return list(mirror_urls) + + +class PageCache(object): + """Cache of HTML pages""" + + failure_limit = 3 + + def __init__(self): + self._failures = {} + self._pages = {} + self._archives = {} + + def too_many_failures(self, url): + return self._failures.get(url, 0) >= self.failure_limit + + def get_page(self, url): + return self._pages.get(url) + + def is_archive(self, url): + return self._archives.get(url, False) + + def set_is_archive(self, url, value=True): + self._archives[url] = value + + def add_page_failure(self, url, level): + self._failures[url] = self._failures.get(url, 0)+level + + def add_page(self, urls, page): + for url in urls: + self._pages[url] = page + + +class HTMLPage(object): + """Represents one page, along with its URL""" + + ## FIXME: these regexes are horrible hacks: + _homepage_re = re.compile(r'\s*home\s*page', re.I) + _download_re = re.compile(r'\s*download\s+url', re.I) + _href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S) + + def __init__(self, content, url, headers=None, trusted=None): + self.content = content + self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False) + self.url = url + self.headers = headers + self.trusted = trusted + + def __str__(self): + return self.url + + @classmethod + def get_page(cls, link, req, cache=None, skip_archives=True): + url = link.url + url = url.split('#', 1)[0] + if cache.too_many_failures(url): + return None + + # Check for VCS schemes that do not support lookup as web pages. + from pip.vcs import VcsSupport + for scheme in VcsSupport.schemes: + if url.lower().startswith(scheme) and url[len(scheme)] in '+:': + logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals()) + return None + + if cache is not None: + inst = cache.get_page(url) + if inst is not None: + return inst + try: + if skip_archives: + if cache is not None: + if cache.is_archive(url): + return None + filename = link.filename + for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']: + if filename.endswith(bad_ext): + content_type = cls._get_content_type(url) + if content_type.lower().startswith('text/html'): + break + else: + logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type)) + if cache is not None: + cache.set_is_archive(url) + return None + logger.debug('Getting page %s' % url) + + # Tack index.html onto file:// URLs that point to directories + (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url) + if scheme == 'file' and os.path.isdir(url2pathname(path)): + # add trailing slash if not present so urljoin doesn't trim final segment + if not url.endswith('/'): + url += '/' + url = urlparse.urljoin(url, 'index.html') + logger.debug(' file: URL is directory, getting %s' % url) + + resp = urlopen(url) + + real_url = geturl(resp) + headers = resp.info() + contents = resp.read() + encoding = headers.get('Content-Encoding', None) + #XXX need to handle exceptions and add testing for this + if encoding is not None: + if encoding == 'gzip': + contents = gzip.GzipFile(fileobj=BytesIO(contents)).read() + if encoding == 'deflate': + contents = zlib.decompress(contents) + + # The check for archives above only works if the url ends with + # something that looks like an archive. However that is not a + # requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download + # redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz + # Unless we issue a HEAD request on every url we cannot know + # ahead of time for sure if something is HTML or not. However we + # can check after we've downloaded it. + content_type = headers.get('Content-Type', 'unknown') + if not content_type.lower().startswith("text/html"): + logger.debug('Skipping page %s because of Content-Type: %s' % + (link, content_type)) + if cache is not None: + cache.set_is_archive(url) + return None + + inst = cls(u(contents), real_url, headers, trusted=link.trusted) + except (HTTPError, URLError, socket.timeout, socket.error, OSError, WindowsError): + e = sys.exc_info()[1] + desc = str(e) + if isinstance(e, socket.timeout): + log_meth = logger.info + level =1 + desc = 'timed out' + elif isinstance(e, URLError): + #ssl/certificate error + if hasattr(e, 'reason') and (isinstance(e.reason, ssl.SSLError) or isinstance(e.reason, CertificateError)): + desc = 'There was a problem confirming the ssl certificate: %s' % e + log_meth = logger.notify + else: + log_meth = logger.info + if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout): + desc = 'timed out' + level = 1 + else: + level = 2 + elif isinstance(e, HTTPError) and e.code == 404: + ## FIXME: notify? + log_meth = logger.info + level = 2 + else: + log_meth = logger.info + level = 1 + log_meth('Could not fetch URL %s: %s' % (link, desc)) + log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req)) + if cache is not None: + cache.add_page_failure(url, level) + return None + if cache is not None: + cache.add_page([url, real_url], inst) + return inst + + @staticmethod + def _get_content_type(url): + """Get the Content-Type of the given url, using a HEAD request""" + scheme, netloc, path, query, fragment = urlparse.urlsplit(url) + if not scheme in ('http', 'https', 'ftp', 'ftps'): + ## FIXME: some warning or something? + ## assertion error? + return '' + req = Urllib2HeadRequest(url, headers={'Host': netloc}) + resp = urlopen(req) + try: + if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'): + ## FIXME: doesn't handle redirects + return '' + return resp.info().get('content-type', '') + finally: + resp.close() + + @property + def api_version(self): + if not hasattr(self, "_api_version"): + _api_version = None + + metas = [x for x in self.parsed.findall(".//meta") + if x.get("name", "").lower() == "api-version"] + if metas: + try: + _api_version = int(metas[0].get("value", None)) + except (TypeError, ValueError): + _api_version = None + self._api_version = _api_version + return self._api_version + + @property + def base_url(self): + if not hasattr(self, "_base_url"): + base = self.parsed.find(".//base") + if base is not None and base.get("href"): + self._base_url = base.get("href") + else: + self._base_url = self.url + return self._base_url + + @property + def links(self): + """Yields all links in the page""" + for anchor in self.parsed.findall(".//a"): + if anchor.get("href"): + href = anchor.get("href") + url = self.clean_link(urlparse.urljoin(self.base_url, href)) + + # Determine if this link is internal. If that distinction + # doesn't make sense in this context, then we don't make + # any distinction. + internal = None + if self.api_version and self.api_version >= 2: + # Only api_versions >= 2 have a distinction between + # external and internal links + internal = bool(anchor.get("rel") + and "internal" in anchor.get("rel").split()) + + yield Link(url, self, internal=internal) + + def rel_links(self): + for url in self.explicit_rel_links(): + yield url + for url in self.scraped_rel_links(): + yield url + + def explicit_rel_links(self, rels=('homepage', 'download')): + """Yields all links with the given relations""" + rels = set(rels) + + for anchor in self.parsed.findall(".//a"): + if anchor.get("rel") and anchor.get("href"): + found_rels = set(anchor.get("rel").split()) + # Determine the intersection between what rels were found and + # what rels were being looked for + if found_rels & rels: + href = anchor.get("href") + url = self.clean_link(urlparse.urljoin(self.base_url, href)) + yield Link(url, self, trusted=False) + + def scraped_rel_links(self): + # Can we get rid of this horrible horrible method? + for regex in (self._homepage_re, self._download_re): + match = regex.search(self.content) + if not match: + continue + href_match = self._href_re.search(self.content, pos=match.end()) + if not href_match: + continue + url = href_match.group(1) or href_match.group(2) or href_match.group(3) + if not url: + continue + url = self.clean_link(urlparse.urljoin(self.base_url, url)) + yield Link(url, self, trusted=False) + + _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + def clean_link(self, url): + """Makes sure a link is fully encoded. That is, if a ' ' shows up in + the link, it will be rewritten to %20 (while not over-quoting + % or other characters).""" + return self._clean_re.sub( + lambda match: '%%%2x' % ord(match.group(0)), url) + + +class Link(object): + + def __init__(self, url, comes_from=None, internal=None, trusted=None): + self.url = url + self.comes_from = comes_from + self.internal = internal + self.trusted = trusted + + # Set whether it's a wheel + self.wheel = None + if url != Inf and self.splitext()[1] == wheel_ext: + self.wheel = Wheel(self.filename) + + def __str__(self): + if self.comes_from: + return '%s (from %s)' % (self.url, self.comes_from) + else: + return str(self.url) + + def __repr__(self): + return '' % self + + def __eq__(self, other): + return self.url == other.url + + def __ne__(self, other): + return self.url != other.url + + def __lt__(self, other): + return self.url < other.url + + def __le__(self, other): + return self.url <= other.url + + def __gt__(self, other): + return self.url > other.url + + def __ge__(self, other): + return self.url >= other.url + + def __hash__(self): + return hash(self.url) + + @property + def filename(self): + _, netloc, path, _, _ = urlparse.urlsplit(self.url) + name = posixpath.basename(path.rstrip('/')) or netloc + assert name, ('URL %r produced no filename' % self.url) + return name + + @property + def scheme(self): + return urlparse.urlsplit(self.url)[0] + + @property + def path(self): + return urlparse.urlsplit(self.url)[2] + + def splitext(self): + return splitext(posixpath.basename(self.path.rstrip('/'))) + + @property + def url_without_fragment(self): + scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url) + return urlparse.urlunsplit((scheme, netloc, path, query, None)) + + _egg_fragment_re = re.compile(r'#egg=([^&]*)') + + @property + def egg_fragment(self): + match = self._egg_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)') + + @property + def hash(self): + match = self._hash_re.search(self.url) + if match: + return match.group(2) + return None + + @property + def hash_name(self): + match = self._hash_re.search(self.url) + if match: + return match.group(1) + return None + + @property + def show_url(self): + return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) + + @property + def verifiable(self): + """ + Returns True if this link can be verified after download, False if it + cannot, and None if we cannot determine. + """ + trusted = self.trusted or getattr(self.comes_from, "trusted", None) + if trusted is not None and trusted: + # This link came from a trusted source. It *may* be verifiable but + # first we need to see if this page is operating under the new + # API version. + try: + api_version = getattr(self.comes_from, "api_version", None) + api_version = int(api_version) + except (ValueError, TypeError): + api_version = None + + if api_version is None or api_version <= 1: + # This link is either trusted, or it came from a trusted, + # however it is not operating under the API version 2 so + # we can't make any claims about if it's safe or not + return + + if self.hash: + # This link came from a trusted source and it has a hash, so we + # can consider it safe. + return True + else: + # This link came from a trusted source, using the new API + # version, and it does not have a hash. It is NOT verifiable + return False + elif trusted is not None: + # This link came from an untrusted source and we cannot trust it + return False + +#An "Infinite Link" that compares greater than other links +InfLink = Link(Inf) #this object is not currently used as a sortable + + +def get_requirement_from_url(url): + """Get a requirement from the URL, if possible. This looks for #egg + in the URL""" + link = Link(url) + egg_info = link.egg_fragment + if not egg_info: + egg_info = splitext(link.filename)[0] + return package_to_requirement(egg_info) + + +def package_to_requirement(package_name): + """Translate a name like Foo-1.2 to Foo==1.3""" + match = re.search(r'^(.*?)-(dev|\d.*)', package_name) + if match: + name = match.group(1) + version = match.group(2) + else: + name = package_name + version = '' + if version: + return '%s==%s' % (name, version) + else: + return name + + +def get_mirrors(hostname=None): + """Return the list of mirrors from the last record found on the DNS + entry:: + + >>> from pip.index import get_mirrors + >>> get_mirrors() + ['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org', + 'd.pypi.python.org'] + + Originally written for the distutils2 project by Alexis Metaireau. + """ + if hostname is None: + hostname = DEFAULT_MIRROR_HOSTNAME + + # return the last mirror registered on PyPI. + last_mirror_hostname = None + try: + last_mirror_hostname = socket.gethostbyname_ex(hostname)[0] + except socket.gaierror: + return [] + if not last_mirror_hostname or last_mirror_hostname == DEFAULT_MIRROR_HOSTNAME: + last_mirror_hostname = "z.pypi.python.org" + end_letter = last_mirror_hostname.split(".", 1) + + # determine the list from the last one. + return ["%s.%s" % (s, end_letter[1]) for s in string_range(end_letter[0])] + + +def string_range(last): + """Compute the range of string between "a" and last. + + This works for simple "a to z" lists, but also for "a to zz" lists. + """ + for k in range(len(last)): + for x in product(string.ascii_lowercase, repeat=k+1): + result = ''.join(x) + yield result + if result == last: + return + diff --git a/awx/lib/site-packages/pip/locations.py b/awx/lib/site-packages/pip/locations.py new file mode 100644 index 0000000000..26d8676b69 --- /dev/null +++ b/awx/lib/site-packages/pip/locations.py @@ -0,0 +1,156 @@ +"""Locations where we look for configs, install stuff, etc""" + +import sys +import site +import os +import tempfile +from distutils.command.install import install, SCHEME_KEYS +import getpass +from pip.backwardcompat import get_python_lib +import pip.exceptions + +default_cert_path = os.path.join(os.path.dirname(__file__), 'cacert.pem') + +DELETE_MARKER_MESSAGE = '''\ +This file is placed here by pip to indicate the source was put +here by pip. + +Once this package is successfully installed this source code will be +deleted (unless you remove this file). +''' +PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt' + +def write_delete_marker_file(directory): + """ + Write the pip delete marker file into this directory. + """ + filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME) + marker_fp = open(filepath, 'w') + marker_fp.write(DELETE_MARKER_MESSAGE) + marker_fp.close() + + +def running_under_virtualenv(): + """ + Return True if we're running inside a virtualenv, False otherwise. + + """ + return hasattr(sys, 'real_prefix') + + +def virtualenv_no_global(): + """ + Return True if in a venv and no system site packages. + """ + #this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file + site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) + no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt') + if running_under_virtualenv() and os.path.isfile(no_global_file): + return True + +def __get_username(): + """ Returns the effective username of the current process. """ + if sys.platform == 'win32': + return getpass.getuser() + import pwd + return pwd.getpwuid(os.geteuid()).pw_name + +def _get_build_prefix(): + """ Returns a safe build_prefix """ + path = os.path.join(tempfile.gettempdir(), 'pip_build_%s' % + __get_username()) + if sys.platform == 'win32': + """ on windows(tested on 7) temp dirs are isolated """ + return path + try: + os.mkdir(path) + write_delete_marker_file(path) + except OSError: + file_uid = None + try: + fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) + file_uid = os.fstat(fd).st_uid + os.close(fd) + except OSError: + file_uid = None + if file_uid != os.geteuid(): + msg = "The temporary folder for building (%s) is not owned by your user!" \ + % path + print (msg) + print("pip will not work until the temporary folder is " + \ + "either deleted or owned by your user account.") + raise pip.exceptions.InstallationError(msg) + return path + +if running_under_virtualenv(): + build_prefix = os.path.join(sys.prefix, 'build') + src_prefix = os.path.join(sys.prefix, 'src') +else: + # Use tempfile to create a temporary folder for build + # Note: we are NOT using mkdtemp so we can have a consistent build dir + # Note: using realpath due to tmp dirs on OSX being symlinks + build_prefix = _get_build_prefix() + + ## FIXME: keep src in cwd for now (it is not a temporary folder) + try: + src_prefix = os.path.join(os.getcwd(), 'src') + except OSError: + # In case the current working directory has been renamed or deleted + sys.exit("The folder you are executing pip from can no longer be found.") + +# under Mac OS X + virtualenv sys.prefix is not properly resolved +# it is something like /path/to/python/bin/.. +build_prefix = os.path.abspath(os.path.realpath(build_prefix)) +src_prefix = os.path.abspath(src_prefix) + +# FIXME doesn't account for venv linked to global site-packages + +site_packages = get_python_lib() +user_dir = os.path.expanduser('~') +if sys.platform == 'win32': + bin_py = os.path.join(sys.prefix, 'Scripts') + # buildout uses 'bin' on Windows too? + if not os.path.exists(bin_py): + bin_py = os.path.join(sys.prefix, 'bin') + default_storage_dir = os.path.join(user_dir, 'pip') + default_config_file = os.path.join(default_storage_dir, 'pip.ini') + default_log_file = os.path.join(default_storage_dir, 'pip.log') +else: + bin_py = os.path.join(sys.prefix, 'bin') + default_storage_dir = os.path.join(user_dir, '.pip') + default_config_file = os.path.join(default_storage_dir, 'pip.conf') + default_log_file = os.path.join(default_storage_dir, 'pip.log') + + # Forcing to use /usr/local/bin for standard Mac OS X framework installs + # Also log to ~/Library/Logs/ for use with the Console.app log viewer + if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/': + bin_py = '/usr/local/bin' + default_log_file = os.path.join(user_dir, 'Library/Logs/pip.log') + + +def distutils_scheme(dist_name, user=False, home=None): + """ + Return a distutils install scheme + """ + from distutils.dist import Distribution + + scheme = {} + d = Distribution({'name': dist_name}) + i = install(d) + i.user = user or i.user + i.home = home or i.home + i.finalize_options() + for key in SCHEME_KEYS: + scheme[key] = getattr(i, 'install_'+key) + + #be backward-compatible with what pip has always done? + scheme['scripts'] = bin_py + + if running_under_virtualenv(): + scheme['headers'] = os.path.join(sys.prefix, + 'include', + 'site', + 'python' + sys.version[:3], + dist_name) + + return scheme diff --git a/awx/lib/site-packages/pip/log.py b/awx/lib/site-packages/pip/log.py new file mode 100644 index 0000000000..9eb02b90d2 --- /dev/null +++ b/awx/lib/site-packages/pip/log.py @@ -0,0 +1,187 @@ +"""Logging +""" + +import sys +import logging + +from pip import backwardcompat + + +class Logger(object): + """ + Logging object for use in command-line script. Allows ranges of + levels, to avoid some redundancy of displayed information. + """ + VERBOSE_DEBUG = logging.DEBUG - 1 + DEBUG = logging.DEBUG + INFO = logging.INFO + NOTIFY = (logging.INFO + logging.WARN) / 2 + WARN = WARNING = logging.WARN + ERROR = logging.ERROR + FATAL = logging.FATAL + + LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL] + + def __init__(self): + self.consumers = [] + self.indent = 0 + self.explicit_levels = False + self.in_progress = None + self.in_progress_hanging = False + + def debug(self, msg, *args, **kw): + self.log(self.DEBUG, msg, *args, **kw) + + def info(self, msg, *args, **kw): + self.log(self.INFO, msg, *args, **kw) + + def notify(self, msg, *args, **kw): + self.log(self.NOTIFY, msg, *args, **kw) + + def warn(self, msg, *args, **kw): + self.log(self.WARN, msg, *args, **kw) + + def error(self, msg, *args, **kw): + self.log(self.WARN, msg, *args, **kw) + + def fatal(self, msg, *args, **kw): + self.log(self.FATAL, msg, *args, **kw) + + def log(self, level, msg, *args, **kw): + if args: + if kw: + raise TypeError( + "You may give positional or keyword arguments, not both") + args = args or kw + rendered = None + for consumer_level, consumer in self.consumers: + if self.level_matches(level, consumer_level): + if (self.in_progress_hanging + and consumer in (sys.stdout, sys.stderr)): + self.in_progress_hanging = False + sys.stdout.write('\n') + sys.stdout.flush() + if rendered is None: + if args: + rendered = msg % args + else: + rendered = msg + rendered = ' ' * self.indent + rendered + if self.explicit_levels: + ## FIXME: should this be a name, not a level number? + rendered = '%02i %s' % (level, rendered) + if hasattr(consumer, 'write'): + rendered += '\n' + backwardcompat.fwrite(consumer, rendered) + else: + consumer(rendered) + + def _show_progress(self): + """Should we display download progress?""" + return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty()) + + def start_progress(self, msg): + assert not self.in_progress, ( + "Tried to start_progress(%r) while in_progress %r" + % (msg, self.in_progress)) + if self._show_progress(): + sys.stdout.write(' ' * self.indent + msg) + sys.stdout.flush() + self.in_progress_hanging = True + else: + self.in_progress_hanging = False + self.in_progress = msg + self.last_message = None + + def end_progress(self, msg='done.'): + assert self.in_progress, ( + "Tried to end_progress without start_progress") + if self._show_progress(): + if not self.in_progress_hanging: + # Some message has been printed out since start_progress + sys.stdout.write('...' + self.in_progress + msg + '\n') + sys.stdout.flush() + else: + # These erase any messages shown with show_progress (besides .'s) + logger.show_progress('') + logger.show_progress('') + sys.stdout.write(msg + '\n') + sys.stdout.flush() + self.in_progress = None + self.in_progress_hanging = False + + def show_progress(self, message=None): + """If we are in a progress scope, and no log messages have been + shown, write out another '.'""" + if self.in_progress_hanging: + if message is None: + sys.stdout.write('.') + sys.stdout.flush() + else: + if self.last_message: + padding = ' ' * max(0, len(self.last_message) - len(message)) + else: + padding = '' + sys.stdout.write('\r%s%s%s%s' % + (' ' * self.indent, self.in_progress, message, padding)) + sys.stdout.flush() + self.last_message = message + + def stdout_level_matches(self, level): + """Returns true if a message at this level will go to stdout""" + return self.level_matches(level, self._stdout_level()) + + def _stdout_level(self): + """Returns the level that stdout runs at""" + for level, consumer in self.consumers: + if consumer is sys.stdout: + return level + return self.FATAL + + def level_matches(self, level, consumer_level): + """ + >>> l = Logger() + >>> l.level_matches(3, 4) + False + >>> l.level_matches(3, 2) + True + >>> l.level_matches(slice(None, 3), 3) + False + >>> l.level_matches(slice(None, 3), 2) + True + >>> l.level_matches(slice(1, 3), 1) + True + >>> l.level_matches(slice(2, 3), 1) + False + """ + if isinstance(level, slice): + start, stop = level.start, level.stop + if start is not None and start > consumer_level: + return False + if stop is not None or stop <= consumer_level: + return False + return True + else: + return level >= consumer_level + + @classmethod + def level_for_integer(cls, level): + levels = cls.LEVELS + if level < 0: + return levels[0] + if level >= len(levels): + return levels[-1] + return levels[level] + + def move_stdout_to_stderr(self): + to_remove = [] + to_add = [] + for consumer_level, consumer in self.consumers: + if consumer == sys.stdout: + to_remove.append((consumer_level, consumer)) + to_add.append((consumer_level, sys.stderr)) + for item in to_remove: + self.consumers.remove(item) + self.consumers.extend(to_add) + +logger = Logger() diff --git a/awx/lib/site-packages/pip/pep425tags.py b/awx/lib/site-packages/pip/pep425tags.py new file mode 100644 index 0000000000..95d3753951 --- /dev/null +++ b/awx/lib/site-packages/pip/pep425tags.py @@ -0,0 +1,102 @@ +"""Generate and work with PEP 425 Compatibility Tags.""" + +import sys +import warnings + +try: + import sysconfig +except ImportError: # pragma nocover + # Python < 2.7 + import distutils.sysconfig as sysconfig +import distutils.util + + +def get_abbr_impl(): + """Return abbreviated implementation name.""" + if hasattr(sys, 'pypy_version_info'): + pyimpl = 'pp' + elif sys.platform.startswith('java'): + pyimpl = 'jy' + elif sys.platform == 'cli': + pyimpl = 'ip' + else: + pyimpl = 'cp' + return pyimpl + + +def get_impl_ver(): + """Return implementation version.""" + return ''.join(map(str, sys.version_info[:2])) + + +def get_platform(): + """Return our platform name 'win32', 'linux_x86_64'""" + # XXX remove distutils dependency + return distutils.util.get_platform().replace('.', '_').replace('-', '_') + + +def get_supported(versions=None, noarch=False): + """Return a list of supported tags for each version specified in + `versions`. + + :param versions: a list of string versions, of the form ["33", "32"], + or None. The first version will be assumed to support our ABI. + """ + supported = [] + + # Versions must be given with respect to the preference + if versions is None: + versions = [] + major = sys.version_info[0] + # Support all previous minor Python versions. + for minor in range(sys.version_info[1], -1, -1): + versions.append(''.join(map(str, (major, minor)))) + + impl = get_abbr_impl() + + abis = [] + + try: + soabi = sysconfig.get_config_var('SOABI') + except IOError as e: # Issue #1074 + warnings.warn("{0}".format(e), RuntimeWarning) + soabi = None + + if soabi and soabi.startswith('cpython-'): + abis[0:0] = ['cp' + soabi.split('-', 1)[-1]] + + abi3s = set() + import imp + for suffix in imp.get_suffixes(): + if suffix[0].startswith('.abi'): + abi3s.add(suffix[0].split('.', 2)[1]) + + abis.extend(sorted(list(abi3s))) + + abis.append('none') + + if not noarch: + arch = get_platform() + + # Current version, current API (built specifically for our Python): + for abi in abis: + supported.append(('%s%s' % (impl, versions[0]), abi, arch)) + + # No abi / arch, but requires our implementation: + for i, version in enumerate(versions): + supported.append(('%s%s' % (impl, version), 'none', 'any')) + if i == 0: + # Tagged specifically as being cross-version compatible + # (with just the major version specified) + supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) + + # No abi / arch, generic Python + for i, version in enumerate(versions): + supported.append(('py%s' % (version,), 'none', 'any')) + if i == 0: + supported.append(('py%s' % (version[0]), 'none', 'any')) + + return supported + +supported_tags = get_supported() +supported_tags_noarch = get_supported(noarch=True) diff --git a/awx/lib/site-packages/pip/req.py b/awx/lib/site-packages/pip/req.py new file mode 100644 index 0000000000..93af61c9d3 --- /dev/null +++ b/awx/lib/site-packages/pip/req.py @@ -0,0 +1,1734 @@ +from email.parser import FeedParser +import os +import imp +import pkg_resources +import re +import sys +import shutil +import tempfile +import textwrap +import zipfile + +from distutils.util import change_root +from pip.locations import (bin_py, running_under_virtualenv,PIP_DELETE_MARKER_FILENAME, + write_delete_marker_file) +from pip.exceptions import (InstallationError, UninstallationError, + BestVersionAlreadyInstalled, + DistributionNotFound, PreviousBuildDirError) +from pip.vcs import vcs +from pip.log import logger +from pip.util import (display_path, rmtree, ask, ask_path_exists, backup_dir, + is_installable_dir, is_local, dist_is_local, + dist_in_usersite, dist_in_site_packages, renames, + normalize_path, egg_link_path, make_path_relative, + call_subprocess, is_prerelease, normalize_name) +from pip.backwardcompat import (urlparse, urllib, uses_pycache, + ConfigParser, string_types, HTTPError, + get_python_version, b) +from pip.index import Link +from pip.locations import build_prefix +from pip.download import (get_file_content, is_url, url_to_path, + path_to_url, is_archive_file, + unpack_vcs_link, is_vcs_url, is_file_url, + unpack_file_url, unpack_http_url) +import pip.wheel +from pip.wheel import move_wheel_files + +class InstallRequirement(object): + + def __init__(self, req, comes_from, source_dir=None, editable=False, + url=None, as_egg=False, update=True, prereleases=None, + from_bundle=False): + self.extras = () + if isinstance(req, string_types): + req = pkg_resources.Requirement.parse(req) + self.extras = req.extras + self.req = req + self.comes_from = comes_from + self.source_dir = source_dir + self.editable = editable + self.url = url + self.as_egg = as_egg + self._egg_info_path = None + # This holds the pkg_resources.Distribution object if this requirement + # is already available: + self.satisfied_by = None + # This hold the pkg_resources.Distribution object if this requirement + # conflicts with another installed distribution: + self.conflicts_with = None + self._temp_build_dir = None + self._is_bundle = None + # True if the editable should be updated: + self.update = update + # Set to True after successful installation + self.install_succeeded = None + # UninstallPathSet of uninstalled distribution (for possible rollback) + self.uninstalled = None + self.use_user_site = False + self.target_dir = None + self.from_bundle = from_bundle + + # True if pre-releases are acceptable + if prereleases: + self.prereleases = True + elif self.req is not None: + self.prereleases = any([is_prerelease(x[1]) and x[0] != "!=" for x in self.req.specs]) + else: + self.prereleases = False + + @classmethod + def from_editable(cls, editable_req, comes_from=None, default_vcs=None): + name, url, extras_override = parse_editable(editable_req, default_vcs) + if url.startswith('file:'): + source_dir = url_to_path(url) + else: + source_dir = None + + res = cls(name, comes_from, source_dir=source_dir, editable=True, url=url, prereleases=True) + + if extras_override is not None: + res.extras = extras_override + + return res + + @classmethod + def from_line(cls, name, comes_from=None, prereleases=None): + """Creates an InstallRequirement from a name, which might be a + requirement, directory containing 'setup.py', filename, or URL. + """ + url = None + name = name.strip() + req = None + path = os.path.normpath(os.path.abspath(name)) + link = None + + if is_url(name): + link = Link(name) + elif os.path.isdir(path) and (os.path.sep in name or name.startswith('.')): + if not is_installable_dir(path): + raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % name) + link = Link(path_to_url(name)) + elif is_archive_file(path): + if not os.path.isfile(path): + logger.warn('Requirement %r looks like a filename, but the file does not exist', name) + link = Link(path_to_url(name)) + + # If the line has an egg= definition, but isn't editable, pull the requirement out. + # Otherwise, assume the name is the req for the non URL/path/archive case. + if link and req is None: + url = link.url_without_fragment + req = link.egg_fragment #when fragment is None, this will become an 'unnamed' requirement + + # Handle relative file URLs + if link.scheme == 'file' and re.search(r'\.\./', url): + url = path_to_url(os.path.normpath(os.path.abspath(link.path))) + + else: + req = name + + return cls(req, comes_from, url=url, prereleases=prereleases) + + def __str__(self): + if self.req: + s = str(self.req) + if self.url: + s += ' from %s' % self.url + else: + s = self.url + if self.satisfied_by is not None: + s += ' in %s' % display_path(self.satisfied_by.location) + if self.comes_from: + if isinstance(self.comes_from, string_types): + comes_from = self.comes_from + else: + comes_from = self.comes_from.from_path() + if comes_from: + s += ' (from %s)' % comes_from + return s + + def from_path(self): + if self.req is None: + return None + s = str(self.req) + if self.comes_from: + if isinstance(self.comes_from, string_types): + comes_from = self.comes_from + else: + comes_from = self.comes_from.from_path() + if comes_from: + s += '->' + comes_from + return s + + def build_location(self, build_dir, unpack=True): + if self._temp_build_dir is not None: + return self._temp_build_dir + if self.req is None: + self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-') + self._ideal_build_dir = build_dir + return self._temp_build_dir + if self.editable: + name = self.name.lower() + else: + name = self.name + # FIXME: Is there a better place to create the build_dir? (hg and bzr need this) + if not os.path.exists(build_dir): + _make_build_dir(build_dir) + return os.path.join(build_dir, name) + + def correct_build_location(self): + """If the build location was a temporary directory, this will move it + to a new more permanent location""" + if self.source_dir is not None: + return + assert self.req is not None + assert self._temp_build_dir + old_location = self._temp_build_dir + new_build_dir = self._ideal_build_dir + del self._ideal_build_dir + if self.editable: + name = self.name.lower() + else: + name = self.name + new_location = os.path.join(new_build_dir, name) + if not os.path.exists(new_build_dir): + logger.debug('Creating directory %s' % new_build_dir) + _make_build_dir(new_build_dir) + if os.path.exists(new_location): + raise InstallationError( + 'A package already exists in %s; please remove it to continue' + % display_path(new_location)) + logger.debug('Moving package %s from %s to new location %s' + % (self, display_path(old_location), display_path(new_location))) + shutil.move(old_location, new_location) + self._temp_build_dir = new_location + self.source_dir = new_location + self._egg_info_path = None + + @property + def name(self): + if self.req is None: + return None + return self.req.project_name + + @property + def url_name(self): + if self.req is None: + return None + return urllib.quote(self.req.unsafe_name) + + @property + def setup_py(self): + return os.path.join(self.source_dir, 'setup.py') + + def run_egg_info(self, force_root_egg_info=False): + assert self.source_dir + if self.name: + logger.notify('Running setup.py egg_info for package %s' % self.name) + else: + logger.notify('Running setup.py egg_info for package from %s' % self.url) + logger.indent += 2 + try: + + # if it's distribute>=0.7, it won't contain an importable + # setuptools, and having an egg-info dir blocks the ability of + # setup.py to find setuptools plugins, so delete the egg-info dir if + # no setuptools. it will get recreated by the run of egg_info + # NOTE: this self.name check only works when installing from a specifier + # (not archive path/urls) + # TODO: take this out later + if self.name == 'distribute' and not os.path.isdir(os.path.join(self.source_dir, 'setuptools')): + rmtree(os.path.join(self.source_dir, 'distribute.egg-info')) + + script = self._run_setup_py + script = script.replace('__SETUP_PY__', repr(self.setup_py)) + script = script.replace('__PKG_NAME__', repr(self.name)) + egg_info_cmd = [sys.executable, '-c', script, 'egg_info'] + # We can't put the .egg-info files at the root, because then the source code will be mistaken + # for an installed egg, causing problems + if self.editable or force_root_egg_info: + egg_base_option = [] + else: + egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info') + if not os.path.exists(egg_info_dir): + os.makedirs(egg_info_dir) + egg_base_option = ['--egg-base', 'pip-egg-info'] + call_subprocess( + egg_info_cmd + egg_base_option, + cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False, + command_level=logger.VERBOSE_DEBUG, + command_desc='python setup.py egg_info') + finally: + logger.indent -= 2 + if not self.req: + self.req = pkg_resources.Requirement.parse( + "%(Name)s==%(Version)s" % self.pkg_info()) + self.correct_build_location() + + ## FIXME: This is a lame hack, entirely for PasteScript which has + ## a self-provided entry point that causes this awkwardness + _run_setup_py = """ +__file__ = __SETUP_PY__ +from setuptools.command import egg_info +import pkg_resources +import os +def replacement_run(self): + self.mkpath(self.egg_info) + installer = self.distribution.fetch_build_egg + for ep in pkg_resources.iter_entry_points('egg_info.writers'): + # require=False is the change we're making: + writer = ep.load(require=False) + if writer: + writer(self, ep.name, os.path.join(self.egg_info,ep.name)) + self.find_sources() +egg_info.egg_info.run = replacement_run +exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec')) +""" + + def egg_info_data(self, filename): + if self.satisfied_by is not None: + if not self.satisfied_by.has_metadata(filename): + return None + return self.satisfied_by.get_metadata(filename) + assert self.source_dir + filename = self.egg_info_path(filename) + if not os.path.exists(filename): + return None + fp = open(filename, 'r') + data = fp.read() + fp.close() + return data + + def egg_info_path(self, filename): + if self._egg_info_path is None: + if self.editable: + base = self.source_dir + else: + base = os.path.join(self.source_dir, 'pip-egg-info') + filenames = os.listdir(base) + if self.editable: + filenames = [] + for root, dirs, files in os.walk(base): + for dir in vcs.dirnames: + if dir in dirs: + dirs.remove(dir) + # Iterate over a copy of ``dirs``, since mutating + # a list while iterating over it can cause trouble. + # (See https://github.com/pypa/pip/pull/462.) + for dir in list(dirs): + # Don't search in anything that looks like a virtualenv environment + if (os.path.exists(os.path.join(root, dir, 'bin', 'python')) + or os.path.exists(os.path.join(root, dir, 'Scripts', 'Python.exe'))): + dirs.remove(dir) + # Also don't search through tests + if dir == 'test' or dir == 'tests': + dirs.remove(dir) + filenames.extend([os.path.join(root, dir) + for dir in dirs]) + filenames = [f for f in filenames if f.endswith('.egg-info')] + + if not filenames: + raise InstallationError('No files/directories in %s (from %s)' % (base, filename)) + assert filenames, "No files/directories in %s (from %s)" % (base, filename) + + # if we have more than one match, we pick the toplevel one. This can + # easily be the case if there is a dist folder which contains an + # extracted tarball for testing purposes. + if len(filenames) > 1: + filenames.sort(key=lambda x: x.count(os.path.sep) + + (os.path.altsep and + x.count(os.path.altsep) or 0)) + self._egg_info_path = os.path.join(base, filenames[0]) + return os.path.join(self._egg_info_path, filename) + + def egg_info_lines(self, filename): + data = self.egg_info_data(filename) + if not data: + return [] + result = [] + for line in data.splitlines(): + line = line.strip() + if not line or line.startswith('#'): + continue + result.append(line) + return result + + def pkg_info(self): + p = FeedParser() + data = self.egg_info_data('PKG-INFO') + if not data: + logger.warn('No PKG-INFO file found in %s' % display_path(self.egg_info_path('PKG-INFO'))) + p.feed(data or '') + return p.close() + + @property + def dependency_links(self): + return self.egg_info_lines('dependency_links.txt') + + _requirements_section_re = re.compile(r'\[(.*?)\]') + + def requirements(self, extras=()): + in_extra = None + for line in self.egg_info_lines('requires.txt'): + match = self._requirements_section_re.match(line.lower()) + if match: + in_extra = match.group(1) + continue + if in_extra and in_extra not in extras: + logger.debug('skipping extra %s' % in_extra) + # Skip requirement for an extra we aren't requiring + continue + yield line + + @property + def absolute_versions(self): + for qualifier, version in self.req.specs: + if qualifier == '==': + yield version + + @property + def installed_version(self): + return self.pkg_info()['version'] + + def assert_source_matches_version(self): + assert self.source_dir + version = self.installed_version + if version not in self.req: + logger.warn('Requested %s, but installing version %s' % (self, self.installed_version)) + else: + logger.debug('Source in %s has version %s, which satisfies requirement %s' + % (display_path(self.source_dir), version, self)) + + def update_editable(self, obtain=True): + if not self.url: + logger.info("Cannot update repository at %s; repository location is unknown" % self.source_dir) + return + assert self.editable + assert self.source_dir + if self.url.startswith('file:'): + # Static paths don't get updated + return + assert '+' in self.url, "bad url: %r" % self.url + if not self.update: + return + vc_type, url = self.url.split('+', 1) + backend = vcs.get_backend(vc_type) + if backend: + vcs_backend = backend(self.url) + if obtain: + vcs_backend.obtain(self.source_dir) + else: + vcs_backend.export(self.source_dir) + else: + assert 0, ( + 'Unexpected version control type (in %s): %s' + % (self.url, vc_type)) + + def uninstall(self, auto_confirm=False): + """ + Uninstall the distribution currently satisfying this requirement. + + Prompts before removing or modifying files unless + ``auto_confirm`` is True. + + Refuses to delete or modify files outside of ``sys.prefix`` - + thus uninstallation within a virtual environment can only + modify that virtual environment, even if the virtualenv is + linked to global site-packages. + + """ + if not self.check_if_exists(): + raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,)) + dist = self.satisfied_by or self.conflicts_with + + paths_to_remove = UninstallPathSet(dist) + + pip_egg_info_path = os.path.join(dist.location, + dist.egg_name()) + '.egg-info' + dist_info_path = os.path.join(dist.location, + '-'.join(dist.egg_name().split('-')[:2]) + ) + '.dist-info' + # workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367 + debian_egg_info_path = pip_egg_info_path.replace( + '-py%s' % pkg_resources.PY_MAJOR, '') + easy_install_egg = dist.egg_name() + '.egg' + develop_egg_link = egg_link_path(dist) + + pip_egg_info_exists = os.path.exists(pip_egg_info_path) + debian_egg_info_exists = os.path.exists(debian_egg_info_path) + dist_info_exists = os.path.exists(dist_info_path) + if pip_egg_info_exists or debian_egg_info_exists: + # package installed by pip + if pip_egg_info_exists: + egg_info_path = pip_egg_info_path + else: + egg_info_path = debian_egg_info_path + paths_to_remove.add(egg_info_path) + if dist.has_metadata('installed-files.txt'): + for installed_file in dist.get_metadata('installed-files.txt').splitlines(): + path = os.path.normpath(os.path.join(egg_info_path, installed_file)) + paths_to_remove.add(path) + #FIXME: need a test for this elif block + #occurs with --single-version-externally-managed/--record outside of pip + elif dist.has_metadata('top_level.txt'): + if dist.has_metadata('namespace_packages.txt'): + namespaces = dist.get_metadata('namespace_packages.txt') + else: + namespaces = [] + for top_level_pkg in [p for p + in dist.get_metadata('top_level.txt').splitlines() + if p and p not in namespaces]: + path = os.path.join(dist.location, top_level_pkg) + paths_to_remove.add(path) + paths_to_remove.add(path + '.py') + paths_to_remove.add(path + '.pyc') + + elif dist.location.endswith(easy_install_egg): + # package installed by easy_install + paths_to_remove.add(dist.location) + easy_install_pth = os.path.join(os.path.dirname(dist.location), + 'easy-install.pth') + paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) + + elif develop_egg_link: + # develop egg + fh = open(develop_egg_link, 'r') + link_pointer = os.path.normcase(fh.readline().strip()) + fh.close() + assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location) + paths_to_remove.add(develop_egg_link) + easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), + 'easy-install.pth') + paths_to_remove.add_pth(easy_install_pth, dist.location) + elif dist_info_exists: + for path in pip.wheel.uninstallation_paths(dist): + paths_to_remove.add(path) + + # find distutils scripts= scripts + if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): + for script in dist.metadata_listdir('scripts'): + paths_to_remove.add(os.path.join(bin_py, script)) + if sys.platform == 'win32': + paths_to_remove.add(os.path.join(bin_py, script) + '.bat') + + # find console_scripts + if dist.has_metadata('entry_points.txt'): + config = ConfigParser.SafeConfigParser() + config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt'))) + if config.has_section('console_scripts'): + for name, value in config.items('console_scripts'): + paths_to_remove.add(os.path.join(bin_py, name)) + if sys.platform == 'win32': + paths_to_remove.add(os.path.join(bin_py, name) + '.exe') + paths_to_remove.add(os.path.join(bin_py, name) + '.exe.manifest') + paths_to_remove.add(os.path.join(bin_py, name) + '-script.py') + + paths_to_remove.remove(auto_confirm) + self.uninstalled = paths_to_remove + + def rollback_uninstall(self): + if self.uninstalled: + self.uninstalled.rollback() + else: + logger.error("Can't rollback %s, nothing uninstalled." + % (self.project_name,)) + + def commit_uninstall(self): + if self.uninstalled: + self.uninstalled.commit() + else: + logger.error("Can't commit %s, nothing uninstalled." + % (self.project_name,)) + + def archive(self, build_dir): + assert self.source_dir + create_archive = True + archive_name = '%s-%s.zip' % (self.name, self.installed_version) + archive_path = os.path.join(build_dir, archive_name) + if os.path.exists(archive_path): + response = ask_path_exists( + 'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' % + display_path(archive_path), ('i', 'w', 'b')) + if response == 'i': + create_archive = False + elif response == 'w': + logger.warn('Deleting %s' % display_path(archive_path)) + os.remove(archive_path) + elif response == 'b': + dest_file = backup_dir(archive_path) + logger.warn('Backing up %s to %s' + % (display_path(archive_path), display_path(dest_file))) + shutil.move(archive_path, dest_file) + if create_archive: + zip = zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED) + dir = os.path.normcase(os.path.abspath(self.source_dir)) + for dirpath, dirnames, filenames in os.walk(dir): + if 'pip-egg-info' in dirnames: + dirnames.remove('pip-egg-info') + for dirname in dirnames: + dirname = os.path.join(dirpath, dirname) + name = self._clean_zip_name(dirname, dir) + zipdir = zipfile.ZipInfo(self.name + '/' + name + '/') + zipdir.external_attr = 0x1ED << 16 # 0o755 + zip.writestr(zipdir, '') + for filename in filenames: + if filename == PIP_DELETE_MARKER_FILENAME: + continue + filename = os.path.join(dirpath, filename) + name = self._clean_zip_name(filename, dir) + zip.write(filename, self.name + '/' + name) + zip.close() + logger.indent -= 2 + logger.notify('Saved %s' % display_path(archive_path)) + + def _clean_zip_name(self, name, prefix): + assert name.startswith(prefix+os.path.sep), ( + "name %r doesn't start with prefix %r" % (name, prefix)) + name = name[len(prefix)+1:] + name = name.replace(os.path.sep, '/') + return name + + def install(self, install_options, global_options=(), root=None): + if self.editable: + self.install_editable(install_options, global_options) + return + if self.is_wheel: + self.move_wheel_files(self.source_dir) + self.install_succeeded = True + return + + temp_location = tempfile.mkdtemp('-record', 'pip-') + record_filename = os.path.join(temp_location, 'install-record.txt') + try: + install_args = [sys.executable] + install_args.append('-c') + install_args.append( + "import setuptools;__file__=%r;"\ + "exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py) + install_args += list(global_options) + ['install','--record', record_filename] + + if not self.as_egg: + install_args += ['--single-version-externally-managed'] + + if root is not None: + install_args += ['--root', root] + + if running_under_virtualenv(): + ## FIXME: I'm not sure if this is a reasonable location; probably not + ## but we can't put it in the default location, as that is a virtualenv symlink that isn't writable + install_args += ['--install-headers', + os.path.join(sys.prefix, 'include', 'site', + 'python' + get_python_version())] + logger.notify('Running setup.py install for %s' % self.name) + logger.indent += 2 + try: + call_subprocess(install_args + install_options, + cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False) + finally: + logger.indent -= 2 + if not os.path.exists(record_filename): + logger.notify('Record file %s not found' % record_filename) + return + self.install_succeeded = True + if self.as_egg: + # there's no --always-unzip option we can pass to install command + # so we unable to save the installed-files.txt + return + + def prepend_root(path): + if root is None or not os.path.isabs(path): + return path + else: + return change_root(root, path) + + f = open(record_filename) + for line in f: + line = line.strip() + if line.endswith('.egg-info'): + egg_info_dir = prepend_root(line) + break + else: + logger.warn('Could not find .egg-info directory in install record for %s' % self) + ## FIXME: put the record somewhere + ## FIXME: should this be an error? + return + f.close() + new_lines = [] + f = open(record_filename) + for line in f: + filename = line.strip() + if os.path.isdir(filename): + filename += os.path.sep + new_lines.append(make_path_relative(prepend_root(filename), egg_info_dir)) + f.close() + f = open(os.path.join(egg_info_dir, 'installed-files.txt'), 'w') + f.write('\n'.join(new_lines)+'\n') + f.close() + finally: + if os.path.exists(record_filename): + os.remove(record_filename) + os.rmdir(temp_location) + + def remove_temporary_source(self): + """Remove the source files from this requirement, if they are marked + for deletion""" + if self.is_bundle or os.path.exists(self.delete_marker_filename): + logger.info('Removing source in %s' % self.source_dir) + if self.source_dir: + rmtree(self.source_dir) + self.source_dir = None + if self._temp_build_dir and os.path.exists(self._temp_build_dir): + rmtree(self._temp_build_dir) + self._temp_build_dir = None + + def install_editable(self, install_options, global_options=()): + logger.notify('Running setup.py develop for %s' % self.name) + logger.indent += 2 + try: + ## FIXME: should we do --install-headers here too? + call_subprocess( + [sys.executable, '-c', + "import setuptools; __file__=%r; exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py] + + list(global_options) + ['develop', '--no-deps'] + list(install_options), + + cwd=self.source_dir, filter_stdout=self._filter_install, + show_stdout=False) + finally: + logger.indent -= 2 + self.install_succeeded = True + + def _filter_install(self, line): + level = logger.NOTIFY + for regex in [r'^running .*', r'^writing .*', '^creating .*', '^[Cc]opying .*', + r'^reading .*', r"^removing .*\.egg-info' \(and everything under it\)$", + r'^byte-compiling ', + # Not sure what this warning is, but it seems harmless: + r"^warning: manifest_maker: standard file '-c' not found$"]: + if re.search(regex, line.strip()): + level = logger.INFO + break + return (level, line) + + def check_if_exists(self): + """Find an installed distribution that satisfies or conflicts + with this requirement, and set self.satisfied_by or + self.conflicts_with appropriately.""" + + if self.req is None: + return False + try: + # DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts) + # if we've already set distribute as a conflict to setuptools + # then this check has already run before. we don't want it to + # run again, and return False, since it would block the uninstall + # TODO: remove this later + if (self.req.project_name == 'setuptools' + and self.conflicts_with + and self.conflicts_with.project_name == 'distribute'): + return True + else: + self.satisfied_by = pkg_resources.get_distribution(self.req) + except pkg_resources.DistributionNotFound: + return False + except pkg_resources.VersionConflict: + existing_dist = pkg_resources.get_distribution(self.req.project_name) + if self.use_user_site: + if dist_in_usersite(existing_dist): + self.conflicts_with = existing_dist + elif running_under_virtualenv() and dist_in_site_packages(existing_dist): + raise InstallationError("Will not install to the user site because it will lack sys.path precedence to %s in %s" + %(existing_dist.project_name, existing_dist.location)) + else: + self.conflicts_with = existing_dist + return True + + @property + def is_wheel(self): + return self.url and '.whl' in self.url + + @property + def is_bundle(self): + if self._is_bundle is not None: + return self._is_bundle + base = self._temp_build_dir + if not base: + ## FIXME: this doesn't seem right: + return False + self._is_bundle = (os.path.exists(os.path.join(base, 'pip-manifest.txt')) + or os.path.exists(os.path.join(base, 'pyinstall-manifest.txt'))) + return self._is_bundle + + def bundle_requirements(self): + for dest_dir in self._bundle_editable_dirs: + package = os.path.basename(dest_dir) + ## FIXME: svnism: + for vcs_backend in vcs.backends: + url = rev = None + vcs_bundle_file = os.path.join( + dest_dir, vcs_backend.bundle_file) + if os.path.exists(vcs_bundle_file): + vc_type = vcs_backend.name + fp = open(vcs_bundle_file) + content = fp.read() + fp.close() + url, rev = vcs_backend().parse_vcs_bundle_file(content) + break + if url: + url = '%s+%s@%s' % (vc_type, url, rev) + else: + url = None + yield InstallRequirement( + package, self, editable=True, url=url, + update=False, source_dir=dest_dir, from_bundle=True) + for dest_dir in self._bundle_build_dirs: + package = os.path.basename(dest_dir) + yield InstallRequirement(package, self,source_dir=dest_dir, from_bundle=True) + + def move_bundle_files(self, dest_build_dir, dest_src_dir): + base = self._temp_build_dir + assert base + src_dir = os.path.join(base, 'src') + build_dir = os.path.join(base, 'build') + bundle_build_dirs = [] + bundle_editable_dirs = [] + for source_dir, dest_dir, dir_collection in [ + (src_dir, dest_src_dir, bundle_editable_dirs), + (build_dir, dest_build_dir, bundle_build_dirs)]: + if os.path.exists(source_dir): + for dirname in os.listdir(source_dir): + dest = os.path.join(dest_dir, dirname) + dir_collection.append(dest) + if os.path.exists(dest): + logger.warn('The directory %s (containing package %s) already exists; cannot move source from bundle %s' + % (dest, dirname, self)) + continue + if not os.path.exists(dest_dir): + logger.info('Creating directory %s' % dest_dir) + os.makedirs(dest_dir) + shutil.move(os.path.join(source_dir, dirname), dest) + if not os.listdir(source_dir): + os.rmdir(source_dir) + self._temp_build_dir = None + self._bundle_build_dirs = bundle_build_dirs + self._bundle_editable_dirs = bundle_editable_dirs + + def move_wheel_files(self, wheeldir): + move_wheel_files(self.name, self.req, wheeldir, user=self.use_user_site, home=self.target_dir) + + @property + def delete_marker_filename(self): + assert self.source_dir + return os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME) + + +class Requirements(object): + + def __init__(self): + self._keys = [] + self._dict = {} + + def keys(self): + return self._keys + + def values(self): + return [self._dict[key] for key in self._keys] + + def __contains__(self, item): + return item in self._keys + + def __setitem__(self, key, value): + if key not in self._keys: + self._keys.append(key) + self._dict[key] = value + + def __getitem__(self, key): + return self._dict[key] + + def __repr__(self): + values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()] + return 'Requirements({%s})' % ', '.join(values) + + +class RequirementSet(object): + + def __init__(self, build_dir, src_dir, download_dir, download_cache=None, + upgrade=False, ignore_installed=False, as_egg=False, target_dir=None, + ignore_dependencies=False, force_reinstall=False, use_user_site=False): + self.build_dir = build_dir + self.src_dir = src_dir + self.download_dir = download_dir + self.download_cache = download_cache + self.upgrade = upgrade + self.ignore_installed = ignore_installed + self.force_reinstall = force_reinstall + self.requirements = Requirements() + # Mapping of alias: real_name + self.requirement_aliases = {} + self.unnamed_requirements = [] + self.ignore_dependencies = ignore_dependencies + self.successfully_downloaded = [] + self.successfully_installed = [] + self.reqs_to_cleanup = [] + self.as_egg = as_egg + self.use_user_site = use_user_site + self.target_dir = target_dir #set from --target option + + def __str__(self): + reqs = [req for req in self.requirements.values() + if not req.comes_from] + reqs.sort(key=lambda req: req.name.lower()) + return ' '.join([str(req.req) for req in reqs]) + + def add_requirement(self, install_req): + name = install_req.name + install_req.as_egg = self.as_egg + install_req.use_user_site = self.use_user_site + install_req.target_dir = self.target_dir + if not name: + #url or path requirement w/o an egg fragment + self.unnamed_requirements.append(install_req) + else: + if self.has_requirement(name): + raise InstallationError( + 'Double requirement given: %s (already in %s, name=%r)' + % (install_req, self.get_requirement(name), name)) + self.requirements[name] = install_req + ## FIXME: what about other normalizations? E.g., _ vs. -? + if name.lower() != name: + self.requirement_aliases[name.lower()] = name + + def has_requirement(self, project_name): + for name in project_name, project_name.lower(): + if name in self.requirements or name in self.requirement_aliases: + return True + return False + + @property + def has_requirements(self): + return list(self.requirements.values()) or self.unnamed_requirements + + @property + def has_editables(self): + if any(req.editable for req in self.requirements.values()): + return True + if any(req.editable for req in self.unnamed_requirements): + return True + return False + + @property + def is_download(self): + if self.download_dir: + self.download_dir = os.path.expanduser(self.download_dir) + if os.path.exists(self.download_dir): + return True + else: + logger.fatal('Could not find download directory') + raise InstallationError( + "Could not find or access download directory '%s'" + % display_path(self.download_dir)) + return False + + def get_requirement(self, project_name): + for name in project_name, project_name.lower(): + if name in self.requirements: + return self.requirements[name] + if name in self.requirement_aliases: + return self.requirements[self.requirement_aliases[name]] + raise KeyError("No project with the name %r" % project_name) + + def uninstall(self, auto_confirm=False): + for req in self.requirements.values(): + req.uninstall(auto_confirm=auto_confirm) + req.commit_uninstall() + + def locate_files(self): + ## FIXME: duplicates code from prepare_files; relevant code should + ## probably be factored out into a separate method + unnamed = list(self.unnamed_requirements) + reqs = list(self.requirements.values()) + while reqs or unnamed: + if unnamed: + req_to_install = unnamed.pop(0) + else: + req_to_install = reqs.pop(0) + install_needed = True + if not self.ignore_installed and not req_to_install.editable: + req_to_install.check_if_exists() + if req_to_install.satisfied_by: + if self.upgrade: + #don't uninstall conflict if user install and and conflict is not user install + if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): + req_to_install.conflicts_with = req_to_install.satisfied_by + req_to_install.satisfied_by = None + else: + install_needed = False + if req_to_install.satisfied_by: + logger.notify('Requirement already satisfied ' + '(use --upgrade to upgrade): %s' + % req_to_install) + + if req_to_install.editable: + if req_to_install.source_dir is None: + req_to_install.source_dir = req_to_install.build_location(self.src_dir) + elif install_needed: + req_to_install.source_dir = req_to_install.build_location(self.build_dir, not self.is_download) + + if req_to_install.source_dir is not None and not os.path.isdir(req_to_install.source_dir): + raise InstallationError('Could not install requirement %s ' + 'because source folder %s does not exist ' + '(perhaps --no-download was used without first running ' + 'an equivalent install with --no-install?)' + % (req_to_install, req_to_install.source_dir)) + + def prepare_files(self, finder, force_root_egg_info=False, bundle=False): + """Prepare process. Create temp directories, download and/or unpack files.""" + unnamed = list(self.unnamed_requirements) + reqs = list(self.requirements.values()) + while reqs or unnamed: + if unnamed: + req_to_install = unnamed.pop(0) + else: + req_to_install = reqs.pop(0) + install = True + best_installed = False + not_found = None + if not self.ignore_installed and not req_to_install.editable: + req_to_install.check_if_exists() + if req_to_install.satisfied_by: + if self.upgrade: + if not self.force_reinstall and not req_to_install.url: + try: + url = finder.find_requirement( + req_to_install, self.upgrade) + except BestVersionAlreadyInstalled: + best_installed = True + install = False + except DistributionNotFound: + not_found = sys.exc_info()[1] + else: + # Avoid the need to call find_requirement again + req_to_install.url = url.url + + if not best_installed: + #don't uninstall conflict if user install and conflict is not user install + if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): + req_to_install.conflicts_with = req_to_install.satisfied_by + req_to_install.satisfied_by = None + else: + install = False + if req_to_install.satisfied_by: + if best_installed: + logger.notify('Requirement already up-to-date: %s' + % req_to_install) + else: + logger.notify('Requirement already satisfied ' + '(use --upgrade to upgrade): %s' + % req_to_install) + if req_to_install.editable: + logger.notify('Obtaining %s' % req_to_install) + elif install: + if req_to_install.url and req_to_install.url.lower().startswith('file:'): + logger.notify('Unpacking %s' % display_path(url_to_path(req_to_install.url))) + else: + logger.notify('Downloading/unpacking %s' % req_to_install) + logger.indent += 2 + try: + is_bundle = False + is_wheel = False + if req_to_install.editable: + if req_to_install.source_dir is None: + location = req_to_install.build_location(self.src_dir) + req_to_install.source_dir = location + else: + location = req_to_install.source_dir + if not os.path.exists(self.build_dir): + _make_build_dir(self.build_dir) + req_to_install.update_editable(not self.is_download) + if self.is_download: + req_to_install.run_egg_info() + req_to_install.archive(self.download_dir) + else: + req_to_install.run_egg_info() + elif install: + ##@@ if filesystem packages are not marked + ##editable in a req, a non deterministic error + ##occurs when the script attempts to unpack the + ##build directory + + # NB: This call can result in the creation of a temporary build directory + location = req_to_install.build_location(self.build_dir, not self.is_download) + unpack = True + url = None + + # In the case where the req comes from a bundle, we should + # assume a build dir exists and move on + if req_to_install.from_bundle: + pass + # If a checkout exists, it's unwise to keep going. version + # inconsistencies are logged later, but do not fail the + # installation. + elif os.path.exists(os.path.join(location, 'setup.py')): + msg = textwrap.dedent(""" + pip can't proceed with requirement '%s' due to a pre-existing build directory. + location: %s + This is likely due to a previous installation that failed. + pip is being responsible and not assuming it can delete this. + Please delete it and try again. + """ % (req_to_install, location)) + e = PreviousBuildDirError(msg) + logger.fatal(msg) + raise e + else: + ## FIXME: this won't upgrade when there's an existing package unpacked in `location` + if req_to_install.url is None: + if not_found: + raise not_found + url = finder.find_requirement(req_to_install, upgrade=self.upgrade) + else: + ## FIXME: should req_to_install.url already be a link? + url = Link(req_to_install.url) + assert url + if url: + try: + self.unpack_url(url, location, self.is_download) + except HTTPError: + e = sys.exc_info()[1] + logger.fatal('Could not install requirement %s because of error %s' + % (req_to_install, e)) + raise InstallationError( + 'Could not install requirement %s because of HTTP error %s for URL %s' + % (req_to_install, e, url)) + else: + unpack = False + if unpack: + is_bundle = req_to_install.is_bundle + is_wheel = url and url.filename.endswith('.whl') + if is_bundle: + req_to_install.move_bundle_files(self.build_dir, self.src_dir) + for subreq in req_to_install.bundle_requirements(): + reqs.append(subreq) + self.add_requirement(subreq) + elif self.is_download: + req_to_install.source_dir = location + if not is_wheel: + # FIXME: see https://github.com/pypa/pip/issues/1112 + req_to_install.run_egg_info() + if url and url.scheme in vcs.all_schemes: + req_to_install.archive(self.download_dir) + elif is_wheel: + req_to_install.source_dir = location + req_to_install.url = url.url + dist = list(pkg_resources.find_distributions(location))[0] + if not req_to_install.req: + req_to_install.req = dist.as_requirement() + self.add_requirement(req_to_install) + if not self.ignore_dependencies: + for subreq in dist.requires(req_to_install.extras): + if self.has_requirement(subreq.project_name): + continue + subreq = InstallRequirement(str(subreq), + req_to_install) + reqs.append(subreq) + self.add_requirement(subreq) + else: + req_to_install.source_dir = location + req_to_install.run_egg_info() + if force_root_egg_info: + # We need to run this to make sure that the .egg-info/ + # directory is created for packing in the bundle + req_to_install.run_egg_info(force_root_egg_info=True) + req_to_install.assert_source_matches_version() + #@@ sketchy way of identifying packages not grabbed from an index + if bundle and req_to_install.url: + self.copy_to_build_dir(req_to_install) + install = False + # req_to_install.req is only avail after unpack for URL pkgs + # repeat check_if_exists to uninstall-on-upgrade (#14) + req_to_install.check_if_exists() + if req_to_install.satisfied_by: + if self.upgrade or self.ignore_installed: + #don't uninstall conflict if user install and and conflict is not user install + if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): + req_to_install.conflicts_with = req_to_install.satisfied_by + req_to_install.satisfied_by = None + else: + install = False + if not (is_bundle or is_wheel): + ## FIXME: shouldn't be globally added: + finder.add_dependency_links(req_to_install.dependency_links) + if (req_to_install.extras): + logger.notify("Installing extra requirements: %r" % ','.join(req_to_install.extras)) + if not self.ignore_dependencies: + for req in req_to_install.requirements(req_to_install.extras): + try: + name = pkg_resources.Requirement.parse(req).project_name + except ValueError: + e = sys.exc_info()[1] + ## FIXME: proper warning + logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install)) + continue + if self.has_requirement(name): + ## FIXME: check for conflict + continue + subreq = InstallRequirement(req, req_to_install) + reqs.append(subreq) + self.add_requirement(subreq) + if not self.has_requirement(req_to_install.name): + #'unnamed' requirements will get added here + self.add_requirement(req_to_install) + if self.is_download or req_to_install._temp_build_dir is not None: + self.reqs_to_cleanup.append(req_to_install) + else: + self.reqs_to_cleanup.append(req_to_install) + + if install: + self.successfully_downloaded.append(req_to_install) + if bundle and (req_to_install.url and req_to_install.url.startswith('file:///')): + self.copy_to_build_dir(req_to_install) + finally: + logger.indent -= 2 + + def cleanup_files(self, bundle=False): + """Clean up files, remove builds.""" + logger.notify('Cleaning up...') + logger.indent += 2 + for req in self.reqs_to_cleanup: + req.remove_temporary_source() + + remove_dir = [] + if self._pip_has_created_build_dir(): + remove_dir.append(self.build_dir) + + # The source dir of a bundle can always be removed. + # FIXME: not if it pre-existed the bundle! + if bundle: + remove_dir.append(self.src_dir) + + for dir in remove_dir: + if os.path.exists(dir): + logger.info('Removing temporary dir %s...' % dir) + rmtree(dir) + + logger.indent -= 2 + + def _pip_has_created_build_dir(self): + return (self.build_dir == build_prefix and + os.path.exists(os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME))) + + def copy_to_build_dir(self, req_to_install): + target_dir = req_to_install.editable and self.src_dir or self.build_dir + logger.info("Copying %s to %s" % (req_to_install.name, target_dir)) + dest = os.path.join(target_dir, req_to_install.name) + shutil.copytree(req_to_install.source_dir, dest) + call_subprocess(["python", "%s/setup.py" % dest, "clean"], cwd=dest, + command_desc='python setup.py clean') + + def unpack_url(self, link, location, only_download=False): + if only_download: + loc = self.download_dir + else: + loc = location + if is_vcs_url(link): + return unpack_vcs_link(link, loc, only_download) + # a local file:// index could have links with hashes + elif not link.hash and is_file_url(link): + return unpack_file_url(link, loc) + else: + if self.download_cache: + self.download_cache = os.path.expanduser(self.download_cache) + retval = unpack_http_url(link, location, self.download_cache, self.download_dir) + if only_download: + write_delete_marker_file(location) + return retval + + def install(self, install_options, global_options=(), *args, **kwargs): + """Install everything in this set (after having downloaded and unpacked the packages)""" + to_install = [r for r in self.requirements.values() + if not r.satisfied_by] + + # DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts) + # move the distribute-0.7.X wrapper to the end because it does not + # install a setuptools package. by moving it to the end, we ensure it's + # setuptools dependency is handled first, which will provide the + # setuptools package + # TODO: take this out later + distribute_req = pkg_resources.Requirement.parse("distribute>=0.7") + for req in to_install: + if req.name == 'distribute' and req.installed_version in distribute_req: + to_install.remove(req) + to_install.append(req) + + if to_install: + logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install])) + logger.indent += 2 + try: + for requirement in to_install: + + # DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts) + # when upgrading from distribute-0.6.X to the new merged + # setuptools in py2, we need to force setuptools to uninstall + # distribute. In py3, which is always using distribute, this + # conversion is already happening in distribute's pkg_resources. + # It's ok *not* to check if setuptools>=0.7 because if someone + # were actually trying to ugrade from distribute to setuptools + # 0.6.X, then all this could do is actually help, although that + # upgade path was certainly never "supported" + # TODO: remove this later + if requirement.name == 'setuptools': + try: + # only uninstall distribute<0.7. For >=0.7, setuptools + # will also be present, and that's what we need to + # uninstall + distribute_requirement = pkg_resources.Requirement.parse("distribute<0.7") + existing_distribute = pkg_resources.get_distribution("distribute") + if existing_distribute in distribute_requirement: + requirement.conflicts_with = existing_distribute + except pkg_resources.DistributionNotFound: + # distribute wasn't installed, so nothing to do + pass + + if requirement.conflicts_with: + logger.notify('Found existing installation: %s' + % requirement.conflicts_with) + logger.indent += 2 + try: + requirement.uninstall(auto_confirm=True) + finally: + logger.indent -= 2 + try: + requirement.install(install_options, global_options, *args, **kwargs) + except: + # if install did not succeed, rollback previous uninstall + if requirement.conflicts_with and not requirement.install_succeeded: + requirement.rollback_uninstall() + raise + else: + if requirement.conflicts_with and requirement.install_succeeded: + requirement.commit_uninstall() + requirement.remove_temporary_source() + finally: + logger.indent -= 2 + self.successfully_installed = to_install + + def create_bundle(self, bundle_filename): + ## FIXME: can't decide which is better; zip is easier to read + ## random files from, but tar.bz2 is smaller and not as lame a + ## format. + + ## FIXME: this file should really include a manifest of the + ## packages, maybe some other metadata files. It would make + ## it easier to detect as well. + zip = zipfile.ZipFile(bundle_filename, 'w', zipfile.ZIP_DEFLATED) + vcs_dirs = [] + for dir, basename in (self.build_dir, 'build'), (self.src_dir, 'src'): + dir = os.path.normcase(os.path.abspath(dir)) + for dirpath, dirnames, filenames in os.walk(dir): + for backend in vcs.backends: + vcs_backend = backend() + vcs_url = vcs_rev = None + if vcs_backend.dirname in dirnames: + for vcs_dir in vcs_dirs: + if dirpath.startswith(vcs_dir): + # vcs bundle file already in parent directory + break + else: + vcs_url, vcs_rev = vcs_backend.get_info( + os.path.join(dir, dirpath)) + vcs_dirs.append(dirpath) + vcs_bundle_file = vcs_backend.bundle_file + vcs_guide = vcs_backend.guide % {'url': vcs_url, + 'rev': vcs_rev} + dirnames.remove(vcs_backend.dirname) + break + if 'pip-egg-info' in dirnames: + dirnames.remove('pip-egg-info') + for dirname in dirnames: + dirname = os.path.join(dirpath, dirname) + name = self._clean_zip_name(dirname, dir) + zip.writestr(basename + '/' + name + '/', '') + for filename in filenames: + if filename == PIP_DELETE_MARKER_FILENAME: + continue + filename = os.path.join(dirpath, filename) + name = self._clean_zip_name(filename, dir) + zip.write(filename, basename + '/' + name) + if vcs_url: + name = os.path.join(dirpath, vcs_bundle_file) + name = self._clean_zip_name(name, dir) + zip.writestr(basename + '/' + name, vcs_guide) + + zip.writestr('pip-manifest.txt', self.bundle_requirements()) + zip.close() + + BUNDLE_HEADER = '''\ +# This is a pip bundle file, that contains many source packages +# that can be installed as a group. You can install this like: +# pip this_file.zip +# The rest of the file contains a list of all the packages included: +''' + + def bundle_requirements(self): + parts = [self.BUNDLE_HEADER] + for req in [req for req in self.requirements.values() + if not req.comes_from]: + parts.append('%s==%s\n' % (req.name, req.installed_version)) + parts.append('# These packages were installed to satisfy the above requirements:\n') + for req in [req for req in self.requirements.values() + if req.comes_from]: + parts.append('%s==%s\n' % (req.name, req.installed_version)) + ## FIXME: should we do something with self.unnamed_requirements? + return ''.join(parts) + + def _clean_zip_name(self, name, prefix): + assert name.startswith(prefix+os.path.sep), ( + "name %r doesn't start with prefix %r" % (name, prefix)) + name = name[len(prefix)+1:] + name = name.replace(os.path.sep, '/') + return name + + +def _make_build_dir(build_dir): + os.makedirs(build_dir) + write_delete_marker_file(build_dir) + + +_scheme_re = re.compile(r'^(http|https|file):', re.I) + + +def parse_requirements(filename, finder=None, comes_from=None, options=None): + skip_match = None + skip_regex = options.skip_requirements_regex if options else None + if skip_regex: + skip_match = re.compile(skip_regex) + reqs_file_dir = os.path.dirname(os.path.abspath(filename)) + filename, content = get_file_content(filename, comes_from=comes_from) + for line_number, line in enumerate(content.splitlines()): + line_number += 1 + line = line.strip() + if not line or line.startswith('#'): + continue + if skip_match and skip_match.search(line): + continue + if line.startswith('-r') or line.startswith('--requirement'): + if line.startswith('-r'): + req_url = line[2:].strip() + else: + req_url = line[len('--requirement'):].strip().strip('=') + if _scheme_re.search(filename): + # Relative to a URL + req_url = urlparse.urljoin(filename, req_url) + elif not _scheme_re.search(req_url): + req_url = os.path.join(os.path.dirname(filename), req_url) + for item in parse_requirements(req_url, finder, comes_from=filename, options=options): + yield item + elif line.startswith('-Z') or line.startswith('--always-unzip'): + # No longer used, but previously these were used in + # requirement files, so we'll ignore. + pass + elif line.startswith('-f') or line.startswith('--find-links'): + if line.startswith('-f'): + line = line[2:].strip() + else: + line = line[len('--find-links'):].strip().lstrip('=') + ## FIXME: it would be nice to keep track of the source of + ## the find_links: + # support a find-links local path relative to a requirements file + relative_to_reqs_file = os.path.join(reqs_file_dir, line) + if os.path.exists(relative_to_reqs_file): + line = relative_to_reqs_file + if finder: + finder.find_links.append(line) + elif line.startswith('-i') or line.startswith('--index-url'): + if line.startswith('-i'): + line = line[2:].strip() + else: + line = line[len('--index-url'):].strip().lstrip('=') + if finder: + finder.index_urls = [line] + elif line.startswith('--extra-index-url'): + line = line[len('--extra-index-url'):].strip().lstrip('=') + if finder: + finder.index_urls.append(line) + elif line.startswith('--use-wheel'): + finder.use_wheel = True + elif line.startswith('--no-index'): + finder.index_urls = [] + elif line.startswith("--allow-external"): + line = line[len("--allow-external"):].strip().lstrip("=") + finder.allow_external |= set([normalize_name(line).lower()]) + elif line.startswith("--allow-all-external"): + finder.allow_all_external = True + elif line.startswith("--no-allow-external"): + finder.allow_external = False + elif line.startswith("--no-allow-insecure"): + finder.allow_all_insecure = False + elif line.startswith("--allow-insecure"): + line = line[len("--allow-insecure"):].strip().lstrip("=") + finder.allow_insecure |= set([normalize_name(line).lower()]) + else: + comes_from = '-r %s (line %s)' % (filename, line_number) + if line.startswith('-e') or line.startswith('--editable'): + if line.startswith('-e'): + line = line[2:].strip() + else: + line = line[len('--editable'):].strip().lstrip('=') + req = InstallRequirement.from_editable( + line, comes_from=comes_from, default_vcs=options.default_vcs if options else None) + else: + req = InstallRequirement.from_line(line, comes_from, prereleases=getattr(options, "pre", None)) + yield req + + +def parse_editable(editable_req, default_vcs=None): + """Parses svn+http://blahblah@rev#egg=Foobar into a requirement + (Foobar) and a URL""" + + url = editable_req + extras = None + + # If a file path is specified with extras, strip off the extras. + m = re.match(r'^(.+)(\[[^\]]+\])$', url) + if m: + url_no_extras = m.group(1) + extras = m.group(2) + else: + url_no_extras = url + + if os.path.isdir(url_no_extras): + if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): + raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % url_no_extras) + # Treating it as code that has already been checked out + url_no_extras = path_to_url(url_no_extras) + + if url_no_extras.lower().startswith('file:'): + if extras: + return None, url_no_extras, pkg_resources.Requirement.parse('__placeholder__' + extras).extras + else: + return None, url_no_extras, None + + for version_control in vcs: + if url.lower().startswith('%s:' % version_control): + url = '%s+%s' % (version_control, url) + if '+' not in url: + if default_vcs: + url = default_vcs + '+' + url + else: + raise InstallationError( + '%s should either by a path to a local project or a VCS url beginning with svn+, git+, hg+, or bzr+' % editable_req) + vc_type = url.split('+', 1)[0].lower() + if not vcs.get_backend(vc_type): + error_message = 'For --editable=%s only ' % editable_req + \ + ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ + ' is currently supported' + raise InstallationError(error_message) + match = re.search(r'(?:#|#.*?&)egg=([^&]*)', editable_req) + if (not match or not match.group(1)) and vcs.get_backend(vc_type): + parts = [p for p in editable_req.split('#', 1)[0].split('/') if p] + if parts[-2] in ('tags', 'branches', 'tag', 'branch'): + req = parts[-3] + elif parts[-1] == 'trunk': + req = parts[-2] + else: + raise InstallationError( + '--editable=%s is not the right format; it must have #egg=Package' + % editable_req) + else: + req = match.group(1) + ## FIXME: use package_to_requirement? + match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req) + if match: + # Strip off -dev, -0.2, etc. + req = match.group(1) + return req, url, None + + +class UninstallPathSet(object): + """A set of file paths to be removed in the uninstallation of a + requirement.""" + def __init__(self, dist): + self.paths = set() + self._refuse = set() + self.pth = {} + self.dist = dist + self.save_dir = None + self._moved_paths = [] + + def _permitted(self, path): + """ + Return True if the given path is one we are permitted to + remove/modify, False otherwise. + + """ + return is_local(path) + + def _can_uninstall(self): + if not dist_is_local(self.dist): + logger.notify("Not uninstalling %s at %s, outside environment %s" + % (self.dist.project_name, normalize_path(self.dist.location), sys.prefix)) + return False + return True + + def add(self, path): + path = normalize_path(path) + if not os.path.exists(path): + return + if self._permitted(path): + self.paths.add(path) + else: + self._refuse.add(path) + + # __pycache__ files can show up after 'installed-files.txt' is created, due to imports + if os.path.splitext(path)[1] == '.py' and uses_pycache: + self.add(imp.cache_from_source(path)) + + + def add_pth(self, pth_file, entry): + pth_file = normalize_path(pth_file) + if self._permitted(pth_file): + if pth_file not in self.pth: + self.pth[pth_file] = UninstallPthEntries(pth_file) + self.pth[pth_file].add(entry) + else: + self._refuse.add(pth_file) + + def compact(self, paths): + """Compact a path set to contain the minimal number of paths + necessary to contain all paths in the set. If /a/path/ and + /a/path/to/a/file.txt are both in the set, leave only the + shorter path.""" + short_paths = set() + for path in sorted(paths, key=len): + if not any([(path.startswith(shortpath) and + path[len(shortpath.rstrip(os.path.sep))] == os.path.sep) + for shortpath in short_paths]): + short_paths.add(path) + return short_paths + + def _stash(self, path): + return os.path.join( + self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep)) + + def remove(self, auto_confirm=False): + """Remove paths in ``self.paths`` with confirmation (unless + ``auto_confirm`` is True).""" + if not self._can_uninstall(): + return + if not self.paths: + logger.notify("Can't uninstall '%s'. No files were found to uninstall." % self.dist.project_name) + return + logger.notify('Uninstalling %s:' % self.dist.project_name) + logger.indent += 2 + paths = sorted(self.compact(self.paths)) + try: + if auto_confirm: + response = 'y' + else: + for path in paths: + logger.notify(path) + response = ask('Proceed (y/n)? ', ('y', 'n')) + if self._refuse: + logger.notify('Not removing or modifying (outside of prefix):') + for path in self.compact(self._refuse): + logger.notify(path) + if response == 'y': + self.save_dir = tempfile.mkdtemp(suffix='-uninstall', + prefix='pip-') + for path in paths: + new_path = self._stash(path) + logger.info('Removing file or directory %s' % path) + self._moved_paths.append(path) + renames(path, new_path) + for pth in self.pth.values(): + pth.remove() + logger.notify('Successfully uninstalled %s' % self.dist.project_name) + + finally: + logger.indent -= 2 + + def rollback(self): + """Rollback the changes previously made by remove().""" + if self.save_dir is None: + logger.error("Can't roll back %s; was not uninstalled" % self.dist.project_name) + return False + logger.notify('Rolling back uninstall of %s' % self.dist.project_name) + for path in self._moved_paths: + tmp_path = self._stash(path) + logger.info('Replacing %s' % path) + renames(tmp_path, path) + for pth in self.pth: + pth.rollback() + + def commit(self): + """Remove temporary save dir: rollback will no longer be possible.""" + if self.save_dir is not None: + rmtree(self.save_dir) + self.save_dir = None + self._moved_paths = [] + + +class UninstallPthEntries(object): + def __init__(self, pth_file): + if not os.path.isfile(pth_file): + raise UninstallationError("Cannot remove entries from nonexistent file %s" % pth_file) + self.file = pth_file + self.entries = set() + self._saved_lines = None + + def add(self, entry): + entry = os.path.normcase(entry) + # On Windows, os.path.normcase converts the entry to use + # backslashes. This is correct for entries that describe absolute + # paths outside of site-packages, but all the others use forward + # slashes. + if sys.platform == 'win32' and not os.path.splitdrive(entry)[0]: + entry = entry.replace('\\', '/') + self.entries.add(entry) + + def remove(self): + logger.info('Removing pth entries from %s:' % self.file) + fh = open(self.file, 'rb') + # windows uses '\r\n' with py3k, but uses '\n' with py2.x + lines = fh.readlines() + self._saved_lines = lines + fh.close() + if any(b('\r\n') in line for line in lines): + endline = '\r\n' + else: + endline = '\n' + for entry in self.entries: + try: + logger.info('Removing entry: %s' % entry) + lines.remove(b(entry + endline)) + except ValueError: + pass + fh = open(self.file, 'wb') + fh.writelines(lines) + fh.close() + + def rollback(self): + if self._saved_lines is None: + logger.error('Cannot roll back changes to %s, none were made' % self.file) + return False + logger.info('Rolling %s back to previous state' % self.file) + fh = open(self.file, 'wb') + fh.writelines(self._saved_lines) + fh.close() + return True + + +class FakeFile(object): + """Wrap a list of lines in an object with readline() to make + ConfigParser happy.""" + def __init__(self, lines): + self._gen = (l for l in lines) + + def readline(self): + try: + try: + return next(self._gen) + except NameError: + return self._gen.next() + except StopIteration: + return '' + + def __iter__(self): + return self._gen diff --git a/awx/lib/site-packages/pip/runner.py b/awx/lib/site-packages/pip/runner.py new file mode 100644 index 0000000000..be830ad9a9 --- /dev/null +++ b/awx/lib/site-packages/pip/runner.py @@ -0,0 +1,18 @@ +import sys +import os + + +def run(): + base = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ## FIXME: this is kind of crude; if we could create a fake pip + ## module, then exec into it and update pip.__path__ properly, we + ## wouldn't have to update sys.path: + sys.path.insert(0, base) + import pip + return pip.main() + + +if __name__ == '__main__': + exit = run() + if exit: + sys.exit(exit) diff --git a/awx/lib/site-packages/pip/status_codes.py b/awx/lib/site-packages/pip/status_codes.py new file mode 100644 index 0000000000..b6208e9644 --- /dev/null +++ b/awx/lib/site-packages/pip/status_codes.py @@ -0,0 +1,5 @@ +SUCCESS = 0 +ERROR = 1 +UNKNOWN_ERROR = 2 +VIRTUALENV_NOT_FOUND = 3 +NO_MATCHES_FOUND = 23 diff --git a/awx/lib/site-packages/pip/util.py b/awx/lib/site-packages/pip/util.py new file mode 100644 index 0000000000..77f9de6efc --- /dev/null +++ b/awx/lib/site-packages/pip/util.py @@ -0,0 +1,693 @@ +import sys +import shutil +import os +import stat +import re +import posixpath +import pkg_resources +import zipfile +import tarfile +import subprocess +import textwrap +from pip.exceptions import InstallationError, BadCommand, PipError +from pip.backwardcompat import(WindowsError, string_types, raw_input, + console_to_str, user_site, PermissionError) +from pip.locations import site_packages, running_under_virtualenv, virtualenv_no_global +from pip.log import logger +from pip.vendor.distlib import version + +__all__ = ['rmtree', 'display_path', 'backup_dir', + 'find_command', 'ask', 'Inf', + 'normalize_name', 'splitext', + 'format_size', 'is_installable_dir', + 'is_svn_page', 'file_contents', + 'split_leading_dir', 'has_leading_dir', + 'make_path_relative', 'normalize_path', + 'renames', 'get_terminal_size', 'get_prog', + 'unzip_file', 'untar_file', 'create_download_cache_folder', + 'cache_download', 'unpack_file', 'call_subprocess'] + + +def get_prog(): + try: + if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'): + return "%s -m pip" % sys.executable + except (AttributeError, TypeError, IndexError): + pass + return 'pip' + + +def rmtree(dir, ignore_errors=False): + shutil.rmtree(dir, ignore_errors=ignore_errors, + onerror=rmtree_errorhandler) + + +def rmtree_errorhandler(func, path, exc_info): + """On Windows, the files in .svn are read-only, so when rmtree() tries to + remove them, an exception is thrown. We catch that here, remove the + read-only attribute, and hopefully continue without problems.""" + exctype, value = exc_info[:2] + if not ((exctype is WindowsError and value.args[0] == 5) or #others + (exctype is OSError and value.args[0] == 13) or #python2.4 + (exctype is PermissionError and value.args[3] == 5) #python3.3 + ): + raise + # file type should currently be read only + if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD): + raise + # convert to read/write + os.chmod(path, stat.S_IWRITE) + # use the original function to repeat the operation + func(path) + + +def display_path(path): + """Gives the display value for a given path, making it relative to cwd + if possible.""" + path = os.path.normcase(os.path.abspath(path)) + if path.startswith(os.getcwd() + os.path.sep): + path = '.' + path[len(os.getcwd()):] + return path + + +def backup_dir(dir, ext='.bak'): + """Figure out the name of a directory to back up the given dir to + (adding .bak, .bak2, etc)""" + n = 1 + extension = ext + while os.path.exists(dir + extension): + n += 1 + extension = ext + str(n) + return dir + extension + + +def find_command(cmd, paths=None, pathext=None): + """Searches the PATH for the given command and returns its path""" + if paths is None: + paths = os.environ.get('PATH', '').split(os.pathsep) + if isinstance(paths, string_types): + paths = [paths] + # check if there are funny path extensions for executables, e.g. Windows + if pathext is None: + pathext = get_pathext() + pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)] + # don't use extensions if the command ends with one of them + if os.path.splitext(cmd)[1].lower() in pathext: + pathext = [''] + # check if we find the command on PATH + for path in paths: + # try without extension first + cmd_path = os.path.join(path, cmd) + for ext in pathext: + # then including the extension + cmd_path_ext = cmd_path + ext + if os.path.isfile(cmd_path_ext): + return cmd_path_ext + if os.path.isfile(cmd_path): + return cmd_path + raise BadCommand('Cannot find command %r' % cmd) + + +def get_pathext(default_pathext=None): + """Returns the path extensions from environment or a default""" + if default_pathext is None: + default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD']) + pathext = os.environ.get('PATHEXT', default_pathext) + return pathext + + +def ask_path_exists(message, options): + for action in os.environ.get('PIP_EXISTS_ACTION', ''): + if action in options: + return action + return ask(message, options) + + +def ask(message, options): + """Ask the message interactively, with the given possible responses""" + while 1: + if os.environ.get('PIP_NO_INPUT'): + raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message) + response = raw_input(message) + response = response.strip().lower() + if response not in options: + print('Your response (%r) was not one of the expected responses: %s' % ( + response, ', '.join(options))) + else: + return response + + +class _Inf(object): + """I am bigger than everything!""" + + def __eq__(self, other): + if self is other: + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __repr__(self): + return 'Inf' + + +Inf = _Inf() #this object is not currently used as a sortable in our code +del _Inf + + +_normalize_re = re.compile(r'[^a-z]', re.I) + + +def normalize_name(name): + return _normalize_re.sub('-', name.lower()) + + +def format_size(bytes): + if bytes > 1000*1000: + return '%.1fMB' % (bytes/1000.0/1000) + elif bytes > 10*1000: + return '%ikB' % (bytes/1000) + elif bytes > 1000: + return '%.1fkB' % (bytes/1000.0) + else: + return '%ibytes' % bytes + + +def is_installable_dir(path): + """Return True if `path` is a directory containing a setup.py file.""" + if not os.path.isdir(path): + return False + setup_py = os.path.join(path, 'setup.py') + if os.path.isfile(setup_py): + return True + return False + + +def is_svn_page(html): + """Returns true if the page appears to be the index page of an svn repository""" + return (re.search(r'[^<]*Revision \d+:', html) + and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) + + +def file_contents(filename): + fp = open(filename, 'rb') + try: + return fp.read().decode('utf-8') + finally: + fp.close() + + +def split_leading_dir(path): + path = str(path) + path = path.lstrip('/').lstrip('\\') + if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) + or '\\' not in path): + return path.split('/', 1) + elif '\\' in path: + return path.split('\\', 1) + else: + return path, '' + + +def has_leading_dir(paths): + """Returns true if all the paths have the same leading path name + (i.e., everything is in one subdirectory in an archive)""" + common_prefix = None + for path in paths: + prefix, rest = split_leading_dir(path) + if not prefix: + return False + elif common_prefix is None: + common_prefix = prefix + elif prefix != common_prefix: + return False + return True + + +def make_path_relative(path, rel_to): + """ + Make a filename relative, where the filename path, and it is + relative to rel_to + + >>> make_relative_path('/usr/share/something/a-file.pth', + ... '/usr/share/another-place/src/Directory') + '../../../something/a-file.pth' + >>> make_relative_path('/usr/share/something/a-file.pth', + ... '/home/user/src/Directory') + '../../../usr/share/something/a-file.pth' + >>> make_relative_path('/usr/share/a-file.pth', '/usr/share/') + 'a-file.pth' + """ + path_filename = os.path.basename(path) + path = os.path.dirname(path) + path = os.path.normpath(os.path.abspath(path)) + rel_to = os.path.normpath(os.path.abspath(rel_to)) + path_parts = path.strip(os.path.sep).split(os.path.sep) + rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep) + while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]: + path_parts.pop(0) + rel_to_parts.pop(0) + full_parts = ['..']*len(rel_to_parts) + path_parts + [path_filename] + if full_parts == ['']: + return '.' + os.path.sep + return os.path.sep.join(full_parts) + + +def normalize_path(path): + """ + Convert a path to its canonical, case-normalized, absolute version. + + """ + return os.path.normcase(os.path.realpath(path)) + + +def splitext(path): + """Like os.path.splitext, but take off .tar too""" + base, ext = posixpath.splitext(path) + if base.lower().endswith('.tar'): + ext = base[-4:] + ext + base = base[:-4] + return base, ext + + +def renames(old, new): + """Like os.renames(), but handles renaming across devices.""" + # Implementation borrowed from os.renames(). + head, tail = os.path.split(new) + if head and tail and not os.path.exists(head): + os.makedirs(head) + + shutil.move(old, new) + + head, tail = os.path.split(old) + if head and tail: + try: + os.removedirs(head) + except OSError: + pass + + +def is_local(path): + """ + Return True if path is within sys.prefix, if we're running in a virtualenv. + + If we're not in a virtualenv, all paths are considered "local." + + """ + if not running_under_virtualenv(): + return True + return normalize_path(path).startswith(normalize_path(sys.prefix)) + + +def dist_is_local(dist): + """ + Return True if given Distribution object is installed locally + (i.e. within current virtualenv). + + Always True if we're not in a virtualenv. + + """ + return is_local(dist_location(dist)) + + +def dist_in_usersite(dist): + """ + Return True if given Distribution is installed in user site. + """ + if user_site: + return normalize_path(dist_location(dist)).startswith(normalize_path(user_site)) + else: + return False + +def dist_in_site_packages(dist): + """ + Return True if given Distribution is installed in distutils.sysconfig.get_python_lib(). + """ + return normalize_path(dist_location(dist)).startswith(normalize_path(site_packages)) + + +def dist_is_editable(dist): + """Is distribution an editable install?""" + #TODO: factor out determining editableness out of FrozenRequirement + from pip import FrozenRequirement + req = FrozenRequirement.from_dist(dist, []) + return req.editable + +def get_installed_distributions(local_only=True, + skip=('setuptools', 'pip', 'python'), + include_editables=True, + editables_only=False): + """ + Return a list of installed Distribution objects. + + If ``local_only`` is True (default), only return installations + local to the current virtualenv, if in a virtualenv. + + ``skip`` argument is an iterable of lower-case project names to + ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also + skip virtualenv?] + + If ``editables`` is False, don't report editables. + + If ``editables_only`` is True , only report editables. + + """ + if local_only: + local_test = dist_is_local + else: + local_test = lambda d: True + + if include_editables: + editable_test = lambda d: True + else: + editable_test = lambda d: not dist_is_editable(d) + + if editables_only: + editables_only_test = lambda d: dist_is_editable(d) + else: + editables_only_test = lambda d: True + + return [d for d in pkg_resources.working_set + if local_test(d) + and d.key not in skip + and editable_test(d) + and editables_only_test(d) + ] + + +def egg_link_path(dist): + """ + Return the path for the .egg-link file if it exists, otherwise, None. + + There's 3 scenarios: + 1) not in a virtualenv + try to find in site.USER_SITE, then site_packages + 2) in a no-global virtualenv + try to find in site_packages + 3) in a yes-global virtualenv + try to find in site_packages, then site.USER_SITE (don't look in global location) + + For #1 and #3, there could be odd cases, where there's an egg-link in 2 locations. + This method will just return the first one found. + """ + sites = [] + if running_under_virtualenv(): + if virtualenv_no_global(): + sites.append(site_packages) + else: + sites.append(site_packages) + if user_site: + sites.append(user_site) + else: + if user_site: + sites.append(user_site) + sites.append(site_packages) + + for site in sites: + egglink = os.path.join(site, dist.project_name) + '.egg-link' + if os.path.isfile(egglink): + return egglink + + +def dist_location(dist): + """ + Get the site-packages location of this distribution. Generally + this is dist.location, except in the case of develop-installed + packages, where dist.location is the source code location, and we + want to know where the egg-link file is. + + """ + egg_link = egg_link_path(dist) + if egg_link: + return egg_link + return dist.location + + +def get_terminal_size(): + """Returns a tuple (x, y) representing the width(x) and the height(x) + in characters of the terminal window.""" + def ioctl_GWINSZ(fd): + try: + import fcntl + import termios + import struct + cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, + '1234')) + except: + return None + if cr == (0, 0): + return None + if cr == (0, 0): + return None + return cr + cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) + if not cr: + try: + fd = os.open(os.ctermid(), os.O_RDONLY) + cr = ioctl_GWINSZ(fd) + os.close(fd) + except: + pass + if not cr: + cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) + return int(cr[1]), int(cr[0]) + + +def unzip_file(filename, location, flatten=True): + """Unzip the file (zip file located at filename) to the destination + location""" + if not os.path.exists(location): + os.makedirs(location) + zipfp = open(filename, 'rb') + try: + zip = zipfile.ZipFile(zipfp) + leading = has_leading_dir(zip.namelist()) and flatten + for info in zip.infolist(): + name = info.filename + data = zip.read(name) + fn = name + if leading: + fn = split_leading_dir(name)[1] + fn = os.path.join(location, fn) + dir = os.path.dirname(fn) + if not os.path.exists(dir): + os.makedirs(dir) + if fn.endswith('/') or fn.endswith('\\'): + # A directory + if not os.path.exists(fn): + os.makedirs(fn) + else: + fp = open(fn, 'wb') + try: + fp.write(data) + finally: + fp.close() + unix_attributes = info.external_attr >> 16 + if unix_attributes: + os.chmod(fn, unix_attributes) + + + finally: + zipfp.close() + + +def untar_file(filename, location): + """Untar the file (tar file located at filename) to the destination location""" + if not os.path.exists(location): + os.makedirs(location) + if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): + mode = 'r:gz' + elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'): + mode = 'r:bz2' + elif filename.lower().endswith('.tar'): + mode = 'r' + else: + logger.warn('Cannot determine compression type for file %s' % filename) + mode = 'r:*' + tar = tarfile.open(filename, mode) + try: + # note: python<=2.5 doesnt seem to know about pax headers, filter them + leading = has_leading_dir([ + member.name for member in tar.getmembers() + if member.name != 'pax_global_header' + ]) + for member in tar.getmembers(): + fn = member.name + if fn == 'pax_global_header': + continue + if leading: + fn = split_leading_dir(fn)[1] + path = os.path.join(location, fn) + if member.isdir(): + if not os.path.exists(path): + os.makedirs(path) + elif member.issym(): + try: + tar._extract_member(member, path) + except: + e = sys.exc_info()[1] + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warn( + 'In the tar file %s the member %s is invalid: %s' + % (filename, member.name, e)) + continue + else: + try: + fp = tar.extractfile(member) + except (KeyError, AttributeError): + e = sys.exc_info()[1] + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warn( + 'In the tar file %s the member %s is invalid: %s' + % (filename, member.name, e)) + continue + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + destfp = open(path, 'wb') + try: + shutil.copyfileobj(fp, destfp) + finally: + destfp.close() + fp.close() + finally: + tar.close() + + +def create_download_cache_folder(folder): + logger.indent -= 2 + logger.notify('Creating supposed download cache at %s' % folder) + logger.indent += 2 + os.makedirs(folder) + + +def cache_download(target_file, temp_location, content_type): + logger.notify('Storing download in cache at %s' % display_path(target_file)) + shutil.copyfile(temp_location, target_file) + fp = open(target_file+'.content-type', 'w') + fp.write(content_type) + fp.close() + + +def unpack_file(filename, location, content_type, link): + filename = os.path.realpath(filename) + if (content_type == 'application/zip' + or filename.endswith('.zip') + or filename.endswith('.pybundle') + or filename.endswith('.whl') + or zipfile.is_zipfile(filename)): + unzip_file(filename, location, flatten=not filename.endswith(('.pybundle', '.whl'))) + elif (content_type == 'application/x-gzip' + or tarfile.is_tarfile(filename) + or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')): + untar_file(filename, location) + elif (content_type and content_type.startswith('text/html') + and is_svn_page(file_contents(filename))): + # We don't really care about this + from pip.vcs.subversion import Subversion + Subversion('svn+' + link.url).unpack(location) + else: + ## FIXME: handle? + ## FIXME: magic signatures? + logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format' + % (filename, location, content_type)) + raise InstallationError('Cannot determine archive format of %s' % location) + + +def call_subprocess(cmd, show_stdout=True, + filter_stdout=None, cwd=None, + raise_on_returncode=True, + command_level=logger.DEBUG, command_desc=None, + extra_environ=None): + if command_desc is None: + cmd_parts = [] + for part in cmd: + if ' ' in part or '\n' in part or '"' in part or "'" in part: + part = '"%s"' % part.replace('"', '\\"') + cmd_parts.append(part) + command_desc = ' '.join(cmd_parts) + if show_stdout: + stdout = None + else: + stdout = subprocess.PIPE + logger.log(command_level, "Running command %s" % command_desc) + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + try: + proc = subprocess.Popen( + cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout, + cwd=cwd, env=env) + except Exception: + e = sys.exc_info()[1] + logger.fatal( + "Error %s while executing command %s" % (e, command_desc)) + raise + all_output = [] + if stdout is not None: + stdout = proc.stdout + while 1: + line = console_to_str(stdout.readline()) + if not line: + break + line = line.rstrip() + all_output.append(line + '\n') + if filter_stdout: + level = filter_stdout(line) + if isinstance(level, tuple): + level, line = level + logger.log(level, line) + if not logger.stdout_level_matches(level): + logger.show_progress() + else: + logger.info(line) + else: + returned_stdout, returned_stderr = proc.communicate() + all_output = [returned_stdout or ''] + proc.wait() + if proc.returncode: + if raise_on_returncode: + if all_output: + logger.notify('Complete output from command %s:' % command_desc) + logger.notify('\n'.join(all_output) + '\n----------------------------------------') + raise InstallationError( + "Command %s failed with error code %s in %s" + % (command_desc, proc.returncode, cwd)) + else: + logger.warn( + "Command %s had error code %s in %s" + % (command_desc, proc.returncode, cwd)) + if stdout is not None: + return ''.join(all_output) + + +def is_prerelease(vers): + """ + Attempt to determine if this is a pre-release using PEP386/PEP426 rules. + + Will return True if it is a pre-release and False if not. Versions are + assumed to be a pre-release if they cannot be parsed. + """ + normalized = version.suggest_normalized_version(vers) + + if normalized is None: + # Cannot normalize, assume it is a pre-release + return True + + parsed = version.normalized_key(normalized) + return any([any([y in set(["a", "b", "c", "rc", "dev"]) for y in x]) for x in parsed]) diff --git a/awx/lib/site-packages/pip/vcs/__init__.py b/awx/lib/site-packages/pip/vcs/__init__.py new file mode 100644 index 0000000000..a56dd202bc --- /dev/null +++ b/awx/lib/site-packages/pip/vcs/__init__.py @@ -0,0 +1,251 @@ +"""Handles all VCS (version control) support""" + +import os +import shutil + +from pip.backwardcompat import urlparse, urllib +from pip.log import logger +from pip.util import (display_path, backup_dir, find_command, + rmtree, ask_path_exists) + + +__all__ = ['vcs', 'get_src_requirement'] + + +class VcsSupport(object): + _registry = {} + schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] + + def __init__(self): + # Register more schemes with urlparse for various version control systems + urlparse.uses_netloc.extend(self.schemes) + # Python >= 2.7.4, 3.3 doesn't have uses_fragment + if getattr(urlparse, 'uses_fragment', None): + urlparse.uses_fragment.extend(self.schemes) + super(VcsSupport, self).__init__() + + def __iter__(self): + return self._registry.__iter__() + + @property + def backends(self): + return list(self._registry.values()) + + @property + def dirnames(self): + return [backend.dirname for backend in self.backends] + + @property + def all_schemes(self): + schemes = [] + for backend in self.backends: + schemes.extend(backend.schemes) + return schemes + + def register(self, cls): + if not hasattr(cls, 'name'): + logger.warn('Cannot register VCS %s' % cls.__name__) + return + if cls.name not in self._registry: + self._registry[cls.name] = cls + + def unregister(self, cls=None, name=None): + if name in self._registry: + del self._registry[name] + elif cls in self._registry.values(): + del self._registry[cls.name] + else: + logger.warn('Cannot unregister because no class or name given') + + def get_backend_name(self, location): + """ + Return the name of the version control backend if found at given + location, e.g. vcs.get_backend_name('/path/to/vcs/checkout') + """ + for vc_type in self._registry.values(): + path = os.path.join(location, vc_type.dirname) + if os.path.exists(path): + return vc_type.name + return None + + def get_backend(self, name): + name = name.lower() + if name in self._registry: + return self._registry[name] + + def get_backend_from_location(self, location): + vc_type = self.get_backend_name(location) + if vc_type: + return self.get_backend(vc_type) + return None + + +vcs = VcsSupport() + + +class VersionControl(object): + name = '' + dirname = '' + + def __init__(self, url=None, *args, **kwargs): + self.url = url + self._cmd = None + super(VersionControl, self).__init__(*args, **kwargs) + + def _filter(self, line): + return (logger.INFO, line) + + def _is_local_repository(self, repo): + """ + posix absolute paths start with os.path.sep, + win32 ones ones start with drive (like c:\\folder) + """ + drive, tail = os.path.splitdrive(repo) + return repo.startswith(os.path.sep) or drive + + @property + def cmd(self): + if self._cmd is not None: + return self._cmd + command = find_command(self.name) + logger.info('Found command %r at %r' % (self.name, command)) + self._cmd = command + return command + + def get_url_rev(self): + """ + Returns the correct repository URL and revision by parsing the given + repository URL + """ + error_message = ( + "Sorry, '%s' is a malformed VCS url. " + "The format is <vcs>+<protocol>://<url>, " + "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp") + assert '+' in self.url, error_message % self.url + url = self.url.split('+', 1)[1] + scheme, netloc, path, query, frag = urlparse.urlsplit(url) + rev = None + if '@' in path: + path, rev = path.rsplit('@', 1) + url = urlparse.urlunsplit((scheme, netloc, path, query, '')) + return url, rev + + def get_info(self, location): + """ + Returns (url, revision), where both are strings + """ + assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location + return self.get_url(location), self.get_revision(location) + + def normalize_url(self, url): + """ + Normalize a URL for comparison by unquoting it and removing any trailing slash. + """ + return urllib.unquote(url).rstrip('/') + + def compare_urls(self, url1, url2): + """ + Compare two repo URLs for identity, ignoring incidental differences. + """ + return (self.normalize_url(url1) == self.normalize_url(url2)) + + def parse_vcs_bundle_file(self, content): + """ + Takes the contents of the bundled text file that explains how to revert + the stripped off version control data of the given package and returns + the URL and revision of it. + """ + raise NotImplementedError + + def obtain(self, dest): + """ + Called when installing or updating an editable package, takes the + source path of the checkout. + """ + raise NotImplementedError + + def switch(self, dest, url, rev_options): + """ + Switch the repo at ``dest`` to point to ``URL``. + """ + raise NotImplemented + + def update(self, dest, rev_options): + """ + Update an already-existing repo to the given ``rev_options``. + """ + raise NotImplementedError + + def check_destination(self, dest, url, rev_options, rev_display): + """ + Prepare a location to receive a checkout/clone. + + Return True if the location is ready for (and requires) a + checkout/clone, False otherwise. + """ + checkout = True + prompt = False + if os.path.exists(dest): + checkout = False + if os.path.exists(os.path.join(dest, self.dirname)): + existing_url = self.get_url(dest) + if self.compare_urls(existing_url, url): + logger.info('%s in %s exists, and has correct URL (%s)' % + (self.repo_name.title(), display_path(dest), + url)) + logger.notify('Updating %s %s%s' % + (display_path(dest), self.repo_name, + rev_display)) + self.update(dest, rev_options) + else: + logger.warn('%s %s in %s exists with URL %s' % + (self.name, self.repo_name, + display_path(dest), existing_url)) + prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', + ('s', 'i', 'w', 'b')) + else: + logger.warn('Directory %s already exists, ' + 'and is not a %s %s.' % + (dest, self.name, self.repo_name)) + prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b')) + if prompt: + logger.warn('The plan is to install the %s repository %s' % + (self.name, url)) + response = ask_path_exists('What to do? %s' % prompt[0], + prompt[1]) + + if response == 's': + logger.notify('Switching %s %s to %s%s' % + (self.repo_name, display_path(dest), url, + rev_display)) + self.switch(dest, url, rev_options) + elif response == 'i': + # do nothing + pass + elif response == 'w': + logger.warn('Deleting %s' % display_path(dest)) + rmtree(dest) + checkout = True + elif response == 'b': + dest_dir = backup_dir(dest) + logger.warn('Backing up %s to %s' + % (display_path(dest), dest_dir)) + shutil.move(dest, dest_dir) + checkout = True + return checkout + + def unpack(self, location): + if os.path.exists(location): + rmtree(location) + self.obtain(location) + + def get_src_requirement(self, dist, location, find_tags=False): + raise NotImplementedError + + +def get_src_requirement(dist, location, find_tags): + version_control = vcs.get_backend_from_location(location) + if version_control: + return version_control().get_src_requirement(dist, location, find_tags) + logger.warn('cannot determine version of editable source in %s (is not SVN checkout, Git clone, Mercurial clone or Bazaar branch)' % location) + return dist.as_requirement() diff --git a/awx/lib/site-packages/pip/vcs/bazaar.py b/awx/lib/site-packages/pip/vcs/bazaar.py new file mode 100644 index 0000000000..86f4bbc624 --- /dev/null +++ b/awx/lib/site-packages/pip/vcs/bazaar.py @@ -0,0 +1,131 @@ +import os +import tempfile +import re +from pip.backwardcompat import urlparse +from pip.log import logger +from pip.util import rmtree, display_path, call_subprocess +from pip.vcs import vcs, VersionControl +from pip.download import path_to_url2 + + +class Bazaar(VersionControl): + name = 'bzr' + dirname = '.bzr' + repo_name = 'branch' + bundle_file = 'bzr-branch.txt' + schemes = ('bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', 'bzr+lp') + guide = ('# This was a Bazaar branch; to make it a branch again run:\n' + 'bzr branch -r %(rev)s %(url)s .\n') + + def __init__(self, url=None, *args, **kwargs): + super(Bazaar, self).__init__(url, *args, **kwargs) + # Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical + # Register lp but do not expose as a scheme to support bzr+lp. + if getattr(urlparse, 'uses_fragment', None): + urlparse.uses_fragment.extend(['lp']) + urlparse.non_hierarchical.extend(['lp']) + + def parse_vcs_bundle_file(self, content): + url = rev = None + for line in content.splitlines(): + if not line.strip() or line.strip().startswith('#'): + continue + match = re.search(r'^bzr\s*branch\s*-r\s*(\d*)', line) + if match: + rev = match.group(1).strip() + url = line[match.end():].strip().split(None, 1)[0] + if url and rev: + return url, rev + return None, None + + def export(self, location): + """Export the Bazaar repository at the url to the destination location""" + temp_dir = tempfile.mkdtemp('-export', 'pip-') + self.unpack(temp_dir) + if os.path.exists(location): + # Remove the location to make sure Bazaar can export it correctly + rmtree(location) + try: + call_subprocess([self.cmd, 'export', location], cwd=temp_dir, + filter_stdout=self._filter, show_stdout=False) + finally: + rmtree(temp_dir) + + def switch(self, dest, url, rev_options): + call_subprocess([self.cmd, 'switch', url], cwd=dest) + + def update(self, dest, rev_options): + call_subprocess( + [self.cmd, 'pull', '-q'] + rev_options, cwd=dest) + + def obtain(self, dest): + url, rev = self.get_url_rev() + if rev: + rev_options = ['-r', rev] + rev_display = ' (to revision %s)' % rev + else: + rev_options = [] + rev_display = '' + if self.check_destination(dest, url, rev_options, rev_display): + logger.notify('Checking out %s%s to %s' + % (url, rev_display, display_path(dest))) + call_subprocess( + [self.cmd, 'branch', '-q'] + rev_options + [url, dest]) + + def get_url_rev(self): + # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it + url, rev = super(Bazaar, self).get_url_rev() + if url.startswith('ssh://'): + url = 'bzr+' + url + return url, rev + + def get_url(self, location): + urls = call_subprocess( + [self.cmd, 'info'], show_stdout=False, cwd=location) + for line in urls.splitlines(): + line = line.strip() + for x in ('checkout of branch: ', + 'parent branch: '): + if line.startswith(x): + repo = line.split(x)[1] + if self._is_local_repository(repo): + return path_to_url2(repo) + return repo + return None + + def get_revision(self, location): + revision = call_subprocess( + [self.cmd, 'revno'], show_stdout=False, cwd=location) + return revision.splitlines()[-1] + + def get_tag_revs(self, location): + tags = call_subprocess( + [self.cmd, 'tags'], show_stdout=False, cwd=location) + tag_revs = [] + for line in tags.splitlines(): + tags_match = re.search(r'([.\w-]+)\s*(.*)$', line) + if tags_match: + tag = tags_match.group(1) + rev = tags_match.group(2) + tag_revs.append((rev.strip(), tag.strip())) + return dict(tag_revs) + + def get_src_requirement(self, dist, location, find_tags): + repo = self.get_url(location) + if not repo.lower().startswith('bzr:'): + repo = 'bzr+' + repo + egg_project_name = dist.egg_name().split('-', 1)[0] + if not repo: + return None + current_rev = self.get_revision(location) + tag_revs = self.get_tag_revs(location) + + if current_rev in tag_revs: + # It's a tag + full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev]) + else: + full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev) + return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name) + + +vcs.register(Bazaar) diff --git a/awx/lib/site-packages/pip/vcs/git.py b/awx/lib/site-packages/pip/vcs/git.py new file mode 100644 index 0000000000..16acebdc4a --- /dev/null +++ b/awx/lib/site-packages/pip/vcs/git.py @@ -0,0 +1,194 @@ +import tempfile +import re +import os.path +from pip.util import call_subprocess +from pip.util import display_path, rmtree +from pip.vcs import vcs, VersionControl +from pip.log import logger +from pip.backwardcompat import url2pathname, urlparse +urlsplit = urlparse.urlsplit +urlunsplit = urlparse.urlunsplit + + +class Git(VersionControl): + name = 'git' + dirname = '.git' + repo_name = 'clone' + schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file') + bundle_file = 'git-clone.txt' + guide = ('# This was a Git repo; to make it a repo again run:\n' + 'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n') + + def __init__(self, url=None, *args, **kwargs): + + # Works around an apparent Git bug + # (see http://article.gmane.org/gmane.comp.version-control.git/146500) + if url: + scheme, netloc, path, query, fragment = urlsplit(url) + if scheme.endswith('file'): + initial_slashes = path[:-len(path.lstrip('/'))] + newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/') + url = urlunsplit((scheme, netloc, newpath, query, fragment)) + after_plus = scheme.find('+') + 1 + url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment)) + + super(Git, self).__init__(url, *args, **kwargs) + + def parse_vcs_bundle_file(self, content): + url = rev = None + for line in content.splitlines(): + if not line.strip() or line.strip().startswith('#'): + continue + url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line) + if url_match: + url = url_match.group(1).strip() + rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line) + if rev_match: + rev = rev_match.group(1).strip() + if url and rev: + return url, rev + return None, None + + def export(self, location): + """Export the Git repository at the url to the destination location""" + temp_dir = tempfile.mkdtemp('-export', 'pip-') + self.unpack(temp_dir) + try: + if not location.endswith('/'): + location = location + '/' + call_subprocess( + [self.cmd, 'checkout-index', '-a', '-f', '--prefix', location], + filter_stdout=self._filter, show_stdout=False, cwd=temp_dir) + finally: + rmtree(temp_dir) + + def check_rev_options(self, rev, dest, rev_options): + """Check the revision options before checkout to compensate that tags + and branches may need origin/ as a prefix. + Returns the SHA1 of the branch or tag if found. + """ + revisions = self.get_refs(dest) + + origin_rev = 'origin/%s' % rev + if origin_rev in revisions: + # remote branch + return [revisions[origin_rev]] + elif rev in revisions: + # a local tag or branch name + return [revisions[rev]] + else: + logger.warn("Could not find a tag or branch '%s', assuming commit." % rev) + return rev_options + + def switch(self, dest, url, rev_options): + call_subprocess( + [self.cmd, 'config', 'remote.origin.url', url], cwd=dest) + call_subprocess( + [self.cmd, 'checkout', '-q'] + rev_options, cwd=dest) + + self.update_submodules(dest) + + def update(self, dest, rev_options): + # First fetch changes from the default remote + call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest) + # Then reset to wanted revision (maby even origin/master) + if rev_options: + rev_options = self.check_rev_options(rev_options[0], dest, rev_options) + call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest) + #: update submodules + self.update_submodules(dest) + + def obtain(self, dest): + url, rev = self.get_url_rev() + if rev: + rev_options = [rev] + rev_display = ' (to %s)' % rev + else: + rev_options = ['origin/master'] + rev_display = '' + if self.check_destination(dest, url, rev_options, rev_display): + logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest))) + call_subprocess([self.cmd, 'clone', '-q', url, dest]) + #: repo may contain submodules + self.update_submodules(dest) + if rev: + rev_options = self.check_rev_options(rev, dest, rev_options) + # Only do a checkout if rev_options differs from HEAD + if not self.get_revision(dest).startswith(rev_options[0]): + call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest) + + def get_url(self, location): + url = call_subprocess( + [self.cmd, 'config', 'remote.origin.url'], + show_stdout=False, cwd=location) + return url.strip() + + def get_revision(self, location): + current_rev = call_subprocess( + [self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location) + return current_rev.strip() + + def get_refs(self, location): + """Return map of named refs (branches or tags) to commit hashes.""" + output = call_subprocess([self.cmd, 'show-ref'], + show_stdout=False, cwd=location) + rv = {} + for line in output.strip().splitlines(): + commit, ref = line.split(' ', 1) + ref = ref.strip() + ref_name = None + if ref.startswith('refs/remotes/'): + ref_name = ref[len('refs/remotes/'):] + elif ref.startswith('refs/heads/'): + ref_name = ref[len('refs/heads/'):] + elif ref.startswith('refs/tags/'): + ref_name = ref[len('refs/tags/'):] + if ref_name is not None: + rv[ref_name] = commit.strip() + return rv + + def get_src_requirement(self, dist, location, find_tags): + repo = self.get_url(location) + if not repo.lower().startswith('git:'): + repo = 'git+' + repo + egg_project_name = dist.egg_name().split('-', 1)[0] + if not repo: + return None + current_rev = self.get_revision(location) + refs = self.get_refs(location) + # refs maps names to commit hashes; we need the inverse + # if multiple names map to a single commit, this arbitrarily picks one + names_by_commit = dict((commit, ref) for ref, commit in refs.items()) + + if current_rev in names_by_commit: + # It's a tag + full_egg_name = '%s-%s' % (egg_project_name, names_by_commit[current_rev]) + else: + full_egg_name = '%s-dev' % egg_project_name + + return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name) + + def get_url_rev(self): + """ + Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. + That's required because although they use SSH they sometimes doesn't + work with a ssh:// scheme (e.g. Github). But we need a scheme for + parsing. Hence we remove it again afterwards and return it as a stub. + """ + if not '://' in self.url: + assert not 'file:' in self.url + self.url = self.url.replace('git+', 'git+ssh://') + url, rev = super(Git, self).get_url_rev() + url = url.replace('ssh://', '') + else: + url, rev = super(Git, self).get_url_rev() + + return url, rev + + def update_submodules(self, location): + if not os.path.exists(os.path.join(location, '.gitmodules')): + return + call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'], + cwd=location) + +vcs.register(Git) diff --git a/awx/lib/site-packages/pip/vcs/mercurial.py b/awx/lib/site-packages/pip/vcs/mercurial.py new file mode 100644 index 0000000000..af427f77e7 --- /dev/null +++ b/awx/lib/site-packages/pip/vcs/mercurial.py @@ -0,0 +1,151 @@ +import os +import tempfile +import re +import sys +from pip.util import call_subprocess +from pip.util import display_path, rmtree +from pip.log import logger +from pip.vcs import vcs, VersionControl +from pip.download import path_to_url2 +from pip.backwardcompat import ConfigParser + + +class Mercurial(VersionControl): + name = 'hg' + dirname = '.hg' + repo_name = 'clone' + schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http') + bundle_file = 'hg-clone.txt' + guide = ('# This was a Mercurial repo; to make it a repo again run:\n' + 'hg init\nhg pull %(url)s\nhg update -r %(rev)s\n') + + def parse_vcs_bundle_file(self, content): + url = rev = None + for line in content.splitlines(): + if not line.strip() or line.strip().startswith('#'): + continue + url_match = re.search(r'hg\s*pull\s*(.*)\s*', line) + if url_match: + url = url_match.group(1).strip() + rev_match = re.search(r'^hg\s*update\s*-r\s*(.*)\s*', line) + if rev_match: + rev = rev_match.group(1).strip() + if url and rev: + return url, rev + return None, None + + def export(self, location): + """Export the Hg repository at the url to the destination location""" + temp_dir = tempfile.mkdtemp('-export', 'pip-') + self.unpack(temp_dir) + try: + call_subprocess( + [self.cmd, 'archive', location], + filter_stdout=self._filter, show_stdout=False, cwd=temp_dir) + finally: + rmtree(temp_dir) + + def switch(self, dest, url, rev_options): + repo_config = os.path.join(dest, self.dirname, 'hgrc') + config = ConfigParser.SafeConfigParser() + try: + config.read(repo_config) + config.set('paths', 'default', url) + config_file = open(repo_config, 'w') + config.write(config_file) + config_file.close() + except (OSError, ConfigParser.NoSectionError): + e = sys.exc_info()[1] + logger.warn( + 'Could not switch Mercurial repository to %s: %s' + % (url, e)) + else: + call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest) + + def update(self, dest, rev_options): + call_subprocess([self.cmd, 'pull', '-q'], cwd=dest) + call_subprocess( + [self.cmd, 'update', '-q'] + rev_options, cwd=dest) + + def obtain(self, dest): + url, rev = self.get_url_rev() + if rev: + rev_options = [rev] + rev_display = ' (to revision %s)' % rev + else: + rev_options = [] + rev_display = '' + if self.check_destination(dest, url, rev_options, rev_display): + logger.notify('Cloning hg %s%s to %s' + % (url, rev_display, display_path(dest))) + call_subprocess([self.cmd, 'clone', '--noupdate', '-q', url, dest]) + call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest) + + def get_url(self, location): + url = call_subprocess( + [self.cmd, 'showconfig', 'paths.default'], + show_stdout=False, cwd=location).strip() + if self._is_local_repository(url): + url = path_to_url2(url) + return url.strip() + + def get_tag_revs(self, location): + tags = call_subprocess( + [self.cmd, 'tags'], show_stdout=False, cwd=location) + tag_revs = [] + for line in tags.splitlines(): + tags_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line) + if tags_match: + tag = tags_match.group(1) + rev = tags_match.group(2) + if "tip" != tag: + tag_revs.append((rev.strip(), tag.strip())) + return dict(tag_revs) + + def get_branch_revs(self, location): + branches = call_subprocess( + [self.cmd, 'branches'], show_stdout=False, cwd=location) + branch_revs = [] + for line in branches.splitlines(): + branches_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line) + if branches_match: + branch = branches_match.group(1) + rev = branches_match.group(2) + if "default" != branch: + branch_revs.append((rev.strip(), branch.strip())) + return dict(branch_revs) + + def get_revision(self, location): + current_revision = call_subprocess( + [self.cmd, 'parents', '--template={rev}'], + show_stdout=False, cwd=location).strip() + return current_revision + + def get_revision_hash(self, location): + current_rev_hash = call_subprocess( + [self.cmd, 'parents', '--template={node}'], + show_stdout=False, cwd=location).strip() + return current_rev_hash + + def get_src_requirement(self, dist, location, find_tags): + repo = self.get_url(location) + if not repo.lower().startswith('hg:'): + repo = 'hg+' + repo + egg_project_name = dist.egg_name().split('-', 1)[0] + if not repo: + return None + current_rev = self.get_revision(location) + current_rev_hash = self.get_revision_hash(location) + tag_revs = self.get_tag_revs(location) + branch_revs = self.get_branch_revs(location) + if current_rev in tag_revs: + # It's a tag + full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev]) + elif current_rev in branch_revs: + # It's the tip of a branch + full_egg_name = '%s-%s' % (egg_project_name, branch_revs[current_rev]) + else: + full_egg_name = '%s-dev' % egg_project_name + return '%s@%s#egg=%s' % (repo, current_rev_hash, full_egg_name) + +vcs.register(Mercurial) diff --git a/awx/lib/site-packages/pip/vcs/subversion.py b/awx/lib/site-packages/pip/vcs/subversion.py new file mode 100644 index 0000000000..88163ff73f --- /dev/null +++ b/awx/lib/site-packages/pip/vcs/subversion.py @@ -0,0 +1,273 @@ +import os +import re +from pip.backwardcompat import urlparse +from pip.index import Link +from pip.util import rmtree, display_path, call_subprocess +from pip.log import logger +from pip.vcs import vcs, VersionControl + +_svn_xml_url_re = re.compile('url="([^"]+)"') +_svn_rev_re = re.compile('committed-rev="(\d+)"') +_svn_url_re = re.compile(r'URL: (.+)') +_svn_revision_re = re.compile(r'Revision: (.+)') +_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') +_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>') + + +class Subversion(VersionControl): + name = 'svn' + dirname = '.svn' + repo_name = 'checkout' + schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn') + bundle_file = 'svn-checkout.txt' + guide = ('# This was an svn checkout; to make it a checkout again run:\n' + 'svn checkout --force -r %(rev)s %(url)s .\n') + + def get_info(self, location): + """Returns (url, revision), where both are strings""" + assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location + output = call_subprocess( + [self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'}) + match = _svn_url_re.search(output) + if not match: + logger.warn('Cannot determine URL of svn checkout %s' % display_path(location)) + logger.info('Output that cannot be parsed: \n%s' % output) + return None, None + url = match.group(1).strip() + match = _svn_revision_re.search(output) + if not match: + logger.warn('Cannot determine revision of svn checkout %s' % display_path(location)) + logger.info('Output that cannot be parsed: \n%s' % output) + return url, None + return url, match.group(1) + + def parse_vcs_bundle_file(self, content): + for line in content.splitlines(): + if not line.strip() or line.strip().startswith('#'): + continue + match = re.search(r'^-r\s*([^ ])?', line) + if not match: + return None, None + rev = match.group(1) + rest = line[match.end():].strip().split(None, 1)[0] + return rest, rev + return None, None + + def export(self, location): + """Export the svn repository at the url to the destination location""" + url, rev = self.get_url_rev() + rev_options = get_rev_options(url, rev) + logger.notify('Exporting svn repository %s to %s' % (url, location)) + logger.indent += 2 + try: + if os.path.exists(location): + # Subversion doesn't like to check out over an existing directory + # --force fixes this, but was only added in svn 1.5 + rmtree(location) + call_subprocess( + [self.cmd, 'export'] + rev_options + [url, location], + filter_stdout=self._filter, show_stdout=False) + finally: + logger.indent -= 2 + + def switch(self, dest, url, rev_options): + call_subprocess( + [self.cmd, 'switch'] + rev_options + [url, dest]) + + def update(self, dest, rev_options): + call_subprocess( + [self.cmd, 'update'] + rev_options + [dest]) + + def obtain(self, dest): + url, rev = self.get_url_rev() + rev_options = get_rev_options(url, rev) + if rev: + rev_display = ' (to revision %s)' % rev + else: + rev_display = '' + if self.check_destination(dest, url, rev_options, rev_display): + logger.notify('Checking out %s%s to %s' + % (url, rev_display, display_path(dest))) + call_subprocess( + [self.cmd, 'checkout', '-q'] + rev_options + [url, dest]) + + def get_location(self, dist, dependency_links): + for url in dependency_links: + egg_fragment = Link(url).egg_fragment + if not egg_fragment: + continue + if '-' in egg_fragment: + ## FIXME: will this work when a package has - in the name? + key = '-'.join(egg_fragment.split('-')[:-1]).lower() + else: + key = egg_fragment + if key == dist.key: + return url.split('#', 1)[0] + return None + + def get_revision(self, location): + """ + Return the maximum revision for all files under a given location + """ + # Note: taken from setuptools.command.egg_info + revision = 0 + + for base, dirs, files in os.walk(location): + if self.dirname not in dirs: + dirs[:] = [] + continue # no sense walking uncontrolled subdirs + dirs.remove(self.dirname) + entries_fn = os.path.join(base, self.dirname, 'entries') + if not os.path.exists(entries_fn): + ## FIXME: should we warn? + continue + + dirurl, localrev = self._get_svn_url_rev(base) + + if base == location: + base_url = dirurl + '/' # save the root url + elif not dirurl or not dirurl.startswith(base_url): + dirs[:] = [] + continue # not part of the same svn tree, skip it + revision = max(revision, localrev) + return revision + + def get_url_rev(self): + # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it + url, rev = super(Subversion, self).get_url_rev() + if url.startswith('ssh://'): + url = 'svn+' + url + return url, rev + + def get_url(self, location): + # In cases where the source is in a subdirectory, not alongside setup.py + # we have to look up in the location until we find a real setup.py + orig_location = location + while not os.path.exists(os.path.join(location, 'setup.py')): + last_location = location + location = os.path.dirname(location) + if location == last_location: + # We've traversed up to the root of the filesystem without finding setup.py + logger.warn("Could not find setup.py for directory %s (tried all parent directories)" + % orig_location) + return None + + return self._get_svn_url_rev(location)[0] + + def _get_svn_url_rev(self, location): + from pip.exceptions import InstallationError + + f = open(os.path.join(location, self.dirname, 'entries')) + data = f.read() + f.close() + if data.startswith('8') or data.startswith('9') or data.startswith('10'): + data = list(map(str.splitlines, data.split('\n\x0c\n'))) + del data[0][0] # get rid of the '8' + url = data[0][3] + revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0] + elif data.startswith('<?xml'): + match = _svn_xml_url_re.search(data) + if not match: + raise ValueError('Badly formatted data: %r' % data) + url = match.group(1) # get repository URL + revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0] + else: + try: + # subversion >= 1.7 + xml = call_subprocess([self.cmd, 'info', '--xml', location], show_stdout=False) + url = _svn_info_xml_url_re.search(xml).group(1) + revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)] + except InstallationError: + url, revs = None, [] + + if revs: + rev = max(revs) + else: + rev = 0 + + return url, rev + + def get_tag_revs(self, svn_tag_url): + stdout = call_subprocess( + [self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False) + results = [] + for line in stdout.splitlines(): + parts = line.split() + rev = int(parts[0]) + tag = parts[-1].strip('/') + results.append((tag, rev)) + return results + + def find_tag_match(self, rev, tag_revs): + best_match_rev = None + best_tag = None + for tag, tag_rev in tag_revs: + if (tag_rev > rev and + (best_match_rev is None or best_match_rev > tag_rev)): + # FIXME: Is best_match > tag_rev really possible? + # or is it a sign something is wacky? + best_match_rev = tag_rev + best_tag = tag + return best_tag + + def get_src_requirement(self, dist, location, find_tags=False): + repo = self.get_url(location) + if repo is None: + return None + parts = repo.split('/') + ## FIXME: why not project name? + egg_project_name = dist.egg_name().split('-', 1)[0] + rev = self.get_revision(location) + if parts[-2] in ('tags', 'tag'): + # It's a tag, perfect! + full_egg_name = '%s-%s' % (egg_project_name, parts[-1]) + elif parts[-2] in ('branches', 'branch'): + # It's a branch :( + full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev) + elif parts[-1] == 'trunk': + # Trunk :-/ + full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev) + if find_tags: + tag_url = '/'.join(parts[:-1]) + '/tags' + tag_revs = self.get_tag_revs(tag_url) + match = self.find_tag_match(rev, tag_revs) + if match: + logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match) + repo = '%s/%s' % (tag_url, match) + full_egg_name = '%s-%s' % (egg_project_name, match) + else: + # Don't know what it is + logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo) + full_egg_name = '%s-dev_r%s' % (egg_project_name, rev) + return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name) + + +def get_rev_options(url, rev): + if rev: + rev_options = ['-r', rev] + else: + rev_options = [] + + r = urlparse.urlsplit(url) + if hasattr(r, 'username'): + # >= Python-2.5 + username, password = r.username, r.password + else: + netloc = r[1] + if '@' in netloc: + auth = netloc.split('@')[0] + if ':' in auth: + username, password = auth.split(':', 1) + else: + username, password = auth, None + else: + username, password = None, None + + if username: + rev_options += ['--username', username] + if password: + rev_options += ['--password', password] + return rev_options + + +vcs.register(Subversion) diff --git a/awx/lib/site-packages/pip/vendor/__init__.py b/awx/lib/site-packages/pip/vendor/__init__.py new file mode 100644 index 0000000000..a996951fc2 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/__init__.py @@ -0,0 +1,8 @@ +""" +pip.vendor is for vendoring dependencies of pip to prevent needing pip to +depend on something external. + +Files inside of pip.vendor should be considered immutable and should only be +updated to versions from upstream. +""" +from __future__ import absolute_import diff --git a/awx/lib/site-packages/pip/vendor/distlib/__init__.py b/awx/lib/site-packages/pip/vendor/distlib/__init__.py new file mode 100644 index 0000000000..d6ae0f2954 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import logging + +__version__ = '0.1.1' + +class DistlibException(Exception): + pass + +try: + from logging import NullHandler +except ImportError: # pragma: no cover + class NullHandler(logging.Handler): + def handle(self, record): pass + def emit(self, record): pass + +logger = logging.getLogger(__name__) +logger.addHandler(NullHandler()) diff --git a/awx/lib/site-packages/pip/vendor/distlib/_backport/__init__.py b/awx/lib/site-packages/pip/vendor/distlib/_backport/__init__.py new file mode 100644 index 0000000000..f7dbf4c9aa --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/_backport/__init__.py @@ -0,0 +1,6 @@ +"""Modules copied from Python 3 standard libraries, for internal use only. + +Individual classes and functions are found in d2._backport.misc. Intended +usage is to always import things missing from 3.1 from that module: the +built-in/stdlib objects will be used if found. +""" diff --git a/awx/lib/site-packages/pip/vendor/distlib/_backport/misc.py b/awx/lib/site-packages/pip/vendor/distlib/_backport/misc.py new file mode 100644 index 0000000000..cfb318d34f --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/_backport/misc.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Backports for individual classes and functions.""" + +import os +import sys + +__all__ = ['cache_from_source', 'callable', 'fsencode'] + + +try: + from imp import cache_from_source +except ImportError: + def cache_from_source(py_file, debug=__debug__): + ext = debug and 'c' or 'o' + return py_file + ext + + +try: + callable = callable +except NameError: + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode +except AttributeError: + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, str): + return filename.encode(sys.getfilesystemencoding()) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) diff --git a/awx/lib/site-packages/pip/vendor/distlib/_backport/shutil.py b/awx/lib/site-packages/pip/vendor/distlib/_backport/shutil.py new file mode 100644 index 0000000000..9e2e234d46 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/_backport/shutil.py @@ -0,0 +1,761 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +from os.path import abspath +import fnmatch +import collections +import errno +from . import tarfile + +try: + import bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + from pwd import getpwnam +except ImportError: + getpwnam = None + +try: + from grp import getgrnam +except ImportError: + getgrnam = None + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "ExecError", "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", "ignore_patterns"] + +class Error(EnvironmentError): + pass + +class SpecialFileError(EnvironmentError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + +class ExecError(EnvironmentError): + """Raised when a command could not be executed""" + +class ReadError(EnvironmentError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registery operation with the archiving + and unpacking registeries fails""" + + +try: + WindowsError +except NameError: + WindowsError = None + +def copyfileobj(fsrc, fdst, length=16*1024): + """copy data from file-like object fsrc to file-like object fdst""" + while 1: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def copyfile(src, dst): + """Copy data from src to dst""" + if _samefile(src, dst): + raise Error("`%s` and `%s` are the same file" % (src, dst)) + + for fn in [src, dst]: + try: + st = os.stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + raise SpecialFileError("`%s` is a named pipe" % fn) + + with open(src, 'rb') as fsrc: + with open(dst, 'wb') as fdst: + copyfileobj(fsrc, fdst) + +def copymode(src, dst): + """Copy mode bits from src to dst""" + if hasattr(os, 'chmod'): + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + os.chmod(dst, mode) + +def copystat(src, dst): + """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + if hasattr(os, 'utime'): + os.utime(dst, (st.st_atime, st.st_mtime)) + if hasattr(os, 'chmod'): + os.chmod(dst, mode) + if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): + try: + os.chflags(dst, st.st_flags) + except OSError as why: + if (not hasattr(errno, 'EOPNOTSUPP') or + why.errno != errno.EOPNOTSUPP): + raise + +def copy(src, dst): + """Copy data and mode bits ("cp src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copymode(src, dst) + +def copy2(src, dst): + """Copy data and all stat info ("cp -p src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copystat(src, dst) + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False): + """Recursively copy a directory tree. + + The destination directory must not already exist. + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + """ + names = os.listdir(src) + if ignore is not None: + ignored_names = ignore(src, names) + else: + ignored_names = set() + + os.makedirs(dst) + errors = [] + for name in names: + if name in ignored_names: + continue + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + try: + if os.path.islink(srcname): + linkto = os.readlink(srcname) + if symlinks: + os.symlink(linkto, dstname) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occurs. copy2 will raise an error + copy_function(srcname, dstname) + elif os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, copy_function) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcname, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except EnvironmentError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + if WindowsError is not None and isinstance(why, WindowsError): + # Copying file access times may fail on Windows + pass + else: + errors.extend((src, dst, str(why))) + if errors: + raise Error(errors) + +def rmtree(path, ignore_errors=False, onerror=None): + """Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + try: + if os.path.islink(path): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + # can't continue even if onerror hook returns + return + names = [] + try: + names = os.listdir(path) + except os.error: + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + mode = os.lstat(fullname).st_mode + except os.error: + mode = 0 + if stat.S_ISDIR(mode): + rmtree(fullname, ignore_errors, onerror) + else: + try: + os.remove(fullname) + except os.error: + onerror(os.remove, fullname, sys.exc_info()) + try: + os.rmdir(path) + except os.error: + onerror(os.rmdir, path, sys.exc_info()) + + +def _basename(path): + # A basename() variant which first strips the trailing slash, if present. + # Thus we always get the last component of the path, even for directories. + return os.path.basename(path.rstrip(os.path.sep)) + +def move(src, dst): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + real_dst = os.path.join(dst, _basename(src)) + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) + copytree(src, real_dst, symlinks=True) + rmtree(src) + else: + copy2(src, real_dst) + os.unlink(src) + +def _destinsrc(src, dst): + src = abspath(src) + dst = abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _get_gid(name): + """Returns a gid, given a group name.""" + if getgrnam is None or name is None: + return None + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if getpwnam is None or name is None: + return None + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", or ".bz2"). + + Returns the output filename. + """ + tar_compression = {'gzip': 'gz', None: ''} + compress_ext = {'gzip': '.gz'} + + if _BZ2_SUPPORTED: + tar_compression['bzip2'] = 'bz2' + compress_ext['bzip2'] = '.bz2' + + # flags for compression program, each element of list will be an argument + if compress is not None and compress not in compress_ext: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + archive_name = base_name + '.tar' + compress_ext.get(compress, '') + archive_dir = os.path.dirname(archive_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) + try: + tar.add(base_dir, filter=_set_uid_gid) + finally: + tar.close() + + return archive_name + +def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): + # XXX see if we want to keep an external call here + if verbose: + zipoptions = "-r" + else: + zipoptions = "-rq" + from distutils.errors import DistutilsExecError + from distutils.spawn import spawn + try: + spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) + except DistutilsExecError: + # XXX really should distinguish between "couldn't find + # external 'zip' command" and "zip failed". + raise ExecError("unable to create zip file '%s': " + "could neither import the 'zipfile' module nor " + "find a standalone zip utility") % zip_filename + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Uses either the + "zipfile" Python module (if available) or the InfoZIP "zip" utility + (if installed and found on the default search path). If neither tool is + available, raises ExecError. Returns the name of the output zip + file. + """ + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # If zipfile module is not available, try spawning an external 'zip' + # command. + try: + import zipfile + except ImportError: + zipfile = None + + if zipfile is None: + _call_external_zip(base_dir, zip_filename, verbose, dry_run) + else: + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + zip = zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) + + for dirpath, dirnames, filenames in os.walk(base_dir): + for name in filenames: + path = os.path.normpath(os.path.join(dirpath, name)) + if os.path.isfile(path): + zip.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + zip.close() + + return zip_filename + +_ARCHIVE_FORMATS = { + 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), + 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), + 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), + 'zip': (_make_zipfile, [], "ZIP file"), + } + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not isinstance(function, collections.Callable): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "bztar" + or "gztar". + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + save_cwd = os.getcwd() + if root_dir is not None: + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + if base_dir is None: + base_dir = os.curdir + + kwargs = {'dry_run': dry_run, 'logger': logger} + + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if format != 'zip': + kwargs['owner'] = owner + kwargs['group'] = group + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if root_dir is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not isinstance(function, collections.Callable): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registery.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + try: + import zipfile + except ImportError: + raise ReadError('zlib not supported, cannot unpack this archive.') + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + target = os.path.join(extract_dir, *name.split('/')) + if not target: + continue + + _ensure_directory(target) + if not name.endswith('/'): + # file + data = zip.read(info.filename) + f = open(target, 'wb') + try: + f.write(data) + finally: + f.close() + del data + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir) + finally: + tarobj.close() + +_UNPACK_FORMATS = { + 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") + } + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", or "gztar". Or any + other registered format. If not provided, unpack_archive will use the + filename extension and see if an unpacker was registered for that + extension. + + In case none is found, a ValueError is raised. + """ + if extract_dir is None: + extract_dir = os.getcwd() + + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2])) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) + func(filename, extract_dir, **kwargs) diff --git a/awx/lib/site-packages/pip/vendor/distlib/_backport/sysconfig.py b/awx/lib/site-packages/pip/vendor/distlib/_backport/sysconfig.py new file mode 100644 index 0000000000..34e2ef1976 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/_backport/sysconfig.py @@ -0,0 +1,787 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Access to Python's configuration information.""" + +import codecs +import os +import re +import sys +from os.path import pardir, realpath +try: + import configparser +except ImportError: + import ConfigParser as configparser + + +__all__ = [ + 'get_config_h_filename', + 'get_config_var', + 'get_config_vars', + 'get_makefile_filename', + 'get_path', + 'get_path_names', + 'get_paths', + 'get_platform', + 'get_python_version', + 'get_scheme_names', + 'parse_config_h', +] + + +def _safe_realpath(path): + try: + return realpath(path) + except OSError: + return path + + +if sys.executable: + _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) +else: + # sys.executable can be empty if argv[0] has been changed and Python is + # unable to retrieve the real program name + _PROJECT_BASE = _safe_realpath(os.getcwd()) + +if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) +# PC/VS7.1 +if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) +# PC/AMD64 +if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) + + +def is_python_build(): + for fn in ("Setup.dist", "Setup.local"): + if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): + return True + return False + +_PYTHON_BUILD = is_python_build() + +_cfg_read = False + +def _ensure_cfg_read(): + global _cfg_read + if not _cfg_read: + from distlib.resources import finder + _finder = finder('distlib._backport') + _cfgfile = _finder.find('sysconfig.cfg') + assert _cfgfile, 'sysconfig.cfg exists' + with _cfgfile.as_stream() as s: + _SCHEMES.readfp(s) + if _PYTHON_BUILD: + for scheme in ('posix_prefix', 'posix_home'): + _SCHEMES.set(scheme, 'include', '{srcdir}/Include') + _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') + + _cfg_read = True + + +_SCHEMES = configparser.RawConfigParser() +_VAR_REPL = re.compile(r'\{([^{]*?)\}') + +def _expand_globals(config): + _ensure_cfg_read() + if config.has_section('globals'): + globals = config.items('globals') + else: + globals = tuple() + + sections = config.sections() + for section in sections: + if section == 'globals': + continue + for option, value in globals: + if config.has_option(section, option): + continue + config.set(section, option, value) + config.remove_section('globals') + + # now expanding local variables defined in the cfg file + # + for section in config.sections(): + variables = dict(config.items(section)) + + def _replacer(matchobj): + name = matchobj.group(1) + if name in variables: + return variables[name] + return matchobj.group(0) + + for option, value in config.items(section): + config.set(section, option, _VAR_REPL.sub(_replacer, value)) + +#_expand_globals(_SCHEMES) + + # FIXME don't rely on sys.version here, its format is an implementation detail + # of CPython, use sys.version_info or sys.hexversion +_PY_VERSION = sys.version.split()[0] +_PY_VERSION_SHORT = sys.version[:3] +_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] +_PREFIX = os.path.normpath(sys.prefix) +_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) +_CONFIG_VARS = None +_USER_BASE = None + + +def _subst_vars(path, local_vars): + """In the string `path`, replace tokens like {some.thing} with the + corresponding value from the map `local_vars`. + + If there is no corresponding value, leave the token unchanged. + """ + def _replacer(matchobj): + name = matchobj.group(1) + if name in local_vars: + return local_vars[name] + elif name in os.environ: + return os.environ[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, path) + + +def _extend_dict(target_dict, other_dict): + target_keys = target_dict.keys() + for key, value in other_dict.items(): + if key in target_keys: + continue + target_dict[key] = value + + +def _expand_vars(scheme, vars): + res = {} + if vars is None: + vars = {} + _extend_dict(vars, get_config_vars()) + + for key, value in _SCHEMES.items(scheme): + if os.name in ('posix', 'nt'): + value = os.path.expanduser(value) + res[key] = os.path.normpath(_subst_vars(value, vars)) + return res + + +def format_value(value, vars): + def _replacer(matchobj): + name = matchobj.group(1) + if name in vars: + return vars[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, value) + + +def _get_default_scheme(): + if os.name == 'posix': + # the default scheme for posix is posix_prefix + return 'posix_prefix' + return os.name + + +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + # what about 'os2emx', 'riscos' ? + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + if env_base: + return env_base + else: + return joinuser(base, "Python") + + if sys.platform == "darwin": + framework = get_config_var("PYTHONFRAMEWORK") + if framework: + if env_base: + return env_base + else: + return joinuser("~", "Library", framework, "%d.%d" % + sys.version_info[:2]) + + if env_base: + return env_base + else: + return joinuser("~", ".local") + + +def _parse_makefile(filename, vars=None): + """Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + # Regexes needed for parsing Makefile (and similar syntaxes, + # like old-style Setup files). + _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") + _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") + _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") + + if vars is None: + vars = {} + done = {} + notdone = {} + + with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: + lines = f.readlines() + + for line in lines: + if line.startswith('#') or line.strip() == '': + continue + m = _variable_rx.match(line) + if m: + n, v = m.group(1, 2) + v = v.strip() + # `$$' is a literal `$' in make + tmpv = v.replace('$$', '') + + if "$" in tmpv: + notdone[n] = v + else: + try: + v = int(v) + except ValueError: + # insert literal `$' + done[n] = v.replace('$$', '$') + else: + done[n] = v + + # do variable interpolation here + variables = list(notdone.keys()) + + # Variables with a 'PY_' prefix in the makefile. These need to + # be made available without that prefix through sysconfig. + # Special care is needed to ensure that variable expansion works, even + # if the expansion uses the name without a prefix. + renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') + + while len(variables) > 0: + for name in tuple(variables): + value = notdone[name] + m = _findvar1_rx.search(value) or _findvar2_rx.search(value) + if m is not None: + n = m.group(1) + found = True + if n in done: + item = str(done[n]) + elif n in notdone: + # get it on a subsequent round + found = False + elif n in os.environ: + # do it like make: fall back to environment + item = os.environ[n] + + elif n in renamed_variables: + if (name.startswith('PY_') and + name[3:] in renamed_variables): + item = "" + + elif 'PY_' + n in notdone: + found = False + + else: + item = str(done['PY_' + n]) + + else: + done[n] = item = "" + + if found: + after = value[m.end():] + value = value[:m.start()] + item + after + if "$" in after: + notdone[name] = value + else: + try: + value = int(value) + except ValueError: + done[name] = value.strip() + else: + done[name] = value + variables.remove(name) + + if (name.startswith('PY_') and + name[3:] in renamed_variables): + + name = name[3:] + if name not in done: + done[name] = value + + else: + # bogus variable reference (e.g. "prefix=$/opt/python"); + # just drop it since we can't deal + done[name] = value + variables.remove(name) + + # strip spurious spaces + for k, v in done.items(): + if isinstance(v, str): + done[k] = v.strip() + + # save the results in the global dictionary + vars.update(done) + return vars + + +def get_makefile_filename(): + """Return the path of the Makefile.""" + if _PYTHON_BUILD: + return os.path.join(_PROJECT_BASE, "Makefile") + if hasattr(sys, 'abiflags'): + config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) + else: + config_dir_name = 'config' + return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') + + +def _init_posix(vars): + """Initialize the module as appropriate for POSIX systems.""" + # load the installed Makefile: + makefile = get_makefile_filename() + try: + _parse_makefile(makefile, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % makefile + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # load the installed pyconfig.h: + config_h = get_config_h_filename() + try: + with open(config_h) as f: + parse_config_h(f, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % config_h + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # On AIX, there are wrong paths to the linker scripts in the Makefile + # -- these paths are relative to the Python source, but when installed + # the scripts are in another directory. + if _PYTHON_BUILD: + vars['LDSHARED'] = vars['BLDSHARED'] + + +def _init_non_posix(vars): + """Initialize the module as appropriate for NT""" + # set basic install directories + vars['LIBDEST'] = get_path('stdlib') + vars['BINLIBDEST'] = get_path('platstdlib') + vars['INCLUDEPY'] = get_path('include') + vars['SO'] = '.pyd' + vars['EXE'] = '.exe' + vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT + vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) + +# +# public APIs +# + + +def parse_config_h(fp, vars=None): + """Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + if vars is None: + vars = {} + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: + v = int(v) + except ValueError: + pass + vars[n] = v + else: + m = undef_rx.match(line) + if m: + vars[m.group(1)] = 0 + return vars + + +def get_config_h_filename(): + """Return the path of pyconfig.h.""" + if _PYTHON_BUILD: + if os.name == "nt": + inc_dir = os.path.join(_PROJECT_BASE, "PC") + else: + inc_dir = _PROJECT_BASE + else: + inc_dir = get_path('platinclude') + return os.path.join(inc_dir, 'pyconfig.h') + + +def get_scheme_names(): + """Return a tuple containing the schemes names.""" + return tuple(sorted(_SCHEMES.sections())) + + +def get_path_names(): + """Return a tuple containing the paths names.""" + # xxx see if we want a static list + return _SCHEMES.options('posix_prefix') + + +def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): + """Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + """ + _ensure_cfg_read() + if expand: + return _expand_vars(scheme, vars) + else: + return dict(_SCHEMES.items(scheme)) + + +def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): + """Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + """ + return get_paths(scheme, vars, expand)[name] + + +def get_config_vars(*args): + """With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows and Mac OS it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + """ + global _CONFIG_VARS + if _CONFIG_VARS is None: + _CONFIG_VARS = {} + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # distutils2 module. + _CONFIG_VARS['prefix'] = _PREFIX + _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX + _CONFIG_VARS['py_version'] = _PY_VERSION + _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT + _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] + _CONFIG_VARS['base'] = _PREFIX + _CONFIG_VARS['platbase'] = _EXEC_PREFIX + _CONFIG_VARS['projectbase'] = _PROJECT_BASE + try: + _CONFIG_VARS['abiflags'] = sys.abiflags + except AttributeError: + # sys.abiflags may not be defined on all platforms. + _CONFIG_VARS['abiflags'] = '' + + if os.name in ('nt', 'os2'): + _init_non_posix(_CONFIG_VARS) + if os.name == 'posix': + _init_posix(_CONFIG_VARS) + # Setting 'userbase' is done below the call to the + # init function to enable using 'get_config_var' in + # the init-function. + if sys.version >= '2.6': + _CONFIG_VARS['userbase'] = _getuserbase() + + if 'srcdir' not in _CONFIG_VARS: + _CONFIG_VARS['srcdir'] = _PROJECT_BASE + else: + _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) + + # Convert srcdir into an absolute path if it appears necessary. + # Normally it is relative to the build directory. However, during + # testing, for example, we might be running a non-installed python + # from a different directory. + if _PYTHON_BUILD and os.name == "posix": + base = _PROJECT_BASE + try: + cwd = os.getcwd() + except OSError: + cwd = None + if (not os.path.isabs(_CONFIG_VARS['srcdir']) and + base != cwd): + # srcdir is relative and we are not in the same directory + # as the executable. Assume executable is in the build + # directory and make srcdir absolute. + srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) + _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) + + if sys.platform == 'darwin': + kernel_version = os.uname()[2] # Kernel version (8.4.3) + major_version = int(kernel_version.split('.')[0]) + + if major_version < 8: + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + flags = _CONFIG_VARS[key] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _CONFIG_VARS[key] = flags + else: + # Allow the user to override the architecture flags using + # an environment variable. + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _CONFIG_VARS[key] = flags + + # If we're on OSX 10.5 or later and the user tries to + # compiles an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. + # + # The major usecase for this is users using a Python.org + # binary installer on OSX 10.6: that installer uses + # the 10.4u SDK, but that SDK is not installed by default + # when you install Xcode. + # + CFLAGS = _CONFIG_VARS.get('CFLAGS', '') + m = re.search('-isysroot\s+(\S+)', CFLAGS) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags) + _CONFIG_VARS[key] = flags + + if args: + vals = [] + for name in args: + vals.append(_CONFIG_VARS.get(name)) + return vals + else: + return _CONFIG_VARS + + +def get_config_var(name): + """Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + """ + return get_config_vars().get(name) + + +def get_platform(): + """Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name + and version and the architecture (as supplied by 'os.uname()'), + although the exact information included depends on the OS; eg. for IRIX + the architecture isn't particularly important (IRIX only runs on SGI + hardware), but for Linux the kernel version isn't particularly + important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + irix-5.3 + irix64-6.2 + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win-ia64 (64bit Windows on Itanium) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + """ + if os.name == 'nt': + # sniff sys.version for architecture. + prefix = " bit (" + i = sys.version.find(prefix) + if i == -1: + return sys.platform + j = sys.version.find(")", i) + look = sys.version[i+len(prefix):j].lower() + if look == 'amd64': + return 'win-amd64' + if look == 'itanium': + return 'win-ia64' + return sys.platform + + if os.name != "posix" or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + osname, host, release, version, machine = os.uname() + + # Convert the OS name to lowercase, remove '/' characters + # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_') + machine = machine.replace('/', '-') + + if osname[:5] == "linux": + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + elif osname[:5] == "sunos": + if release[0] >= "5": # SunOS 5 == Solaris 2 + osname = "solaris" + release = "%d.%s" % (int(release[0]) - 3, release[2:]) + # fall through to standard osname-release-machine representation + elif osname[:4] == "irix": # could be "irix64"! + return "%s-%s" % (osname, release) + elif osname[:3] == "aix": + return "%s-%s.%s" % (osname, version, release) + elif osname[:6] == "cygwin": + osname = "cygwin" + rel_re = re.compile(r'[\d.]+') + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == "darwin": + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + cfgvars = get_config_vars() + macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') + + if True: + # Always calculate the release of the running machine, + # needed to determine if we can build fat binaries or not. + + macrelease = macver + # Get the system version. Reading this plist is a documented + # way to get the system version (see the documentation for + # the Gestalt Manager) + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'<key>ProductUserVisibleVersion</key>\s*' + r'<string>(.*?)</string>', f.read()) + finally: + f.close() + if m is not None: + macrelease = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + if not macver: + macver = macrelease + + if macver: + release = macver + osname = "macosx" + + if ((macrelease + '.') >= '10.4.' and + '-arch' in get_config_vars().get('CFLAGS', '').strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + # + # Try to detect 4-way universal builds, those have machine-type + # 'universal' instead of 'fat'. + + machine = 'fat' + cflags = get_config_vars().get('CFLAGS') + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return "%s-%s-%s" % (osname, release, machine) + + +def get_python_version(): + return _PY_VERSION_SHORT + + +def _print_dict(title, data): + for index, (key, value) in enumerate(sorted(data.items())): + if index == 0: + print('%s: ' % (title)) + print('\t%s = "%s"' % (key, value)) + + +def _main(): + """Display all information sysconfig detains.""" + print('Platform: "%s"' % get_platform()) + print('Python version: "%s"' % get_python_version()) + print('Current installation scheme: "%s"' % _get_default_scheme()) + print() + _print_dict('Paths', get_paths()) + print() + _print_dict('Variables', get_config_vars()) + + +if __name__ == '__main__': + _main() diff --git a/awx/lib/site-packages/pip/vendor/distlib/_backport/tarfile.py b/awx/lib/site-packages/pip/vendor/distlib/_backport/tarfile.py new file mode 100644 index 0000000000..0580fb7953 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/_backport/tarfile.py @@ -0,0 +1,2607 @@ +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de> +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +from __future__ import print_function + +"""Read from and write to tar format archives. +""" + +__version__ = "$Revision$" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" +__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +import sys +import os +import stat +import errno +import time +import struct +import copy +import re + +try: + import grp, pwd +except ImportError: + grp = pwd = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +symlink_exception = (AttributeError, NotImplementedError) +try: + # WindowsError (1314) will be raised if the caller does not hold the + # SeCreateSymbolicLinkPrivilege privilege + symlink_exception += (WindowsError,) +except NameError: + pass + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] + +if sys.version_info[0] < 3: + import __builtin__ as builtins +else: + import builtins + +_open = builtins.open # Since 'open' is TarFile.open + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = GNU_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# Bits used in the mode field, values in octal. +#--------------------------------------------------------- +S_IFLNK = 0o120000 # symbolic link +S_IFREG = 0o100000 # regular file +S_IFBLK = 0o060000 # block device +S_IFDIR = 0o040000 # directory +S_IFCHR = 0o020000 # character device +S_IFIFO = 0o010000 # fifo + +TSUID = 0o4000 # set UID on execution +TSGID = 0o2000 # set GID on execution +TSVTX = 0o1000 # reserved + +TUREAD = 0o400 # read by owner +TUWRITE = 0o200 # write by owner +TUEXEC = 0o100 # execute/search by owner +TGREAD = 0o040 # read by group +TGWRITE = 0o020 # write by group +TGEXEC = 0o010 # execute/search by group +TOREAD = 0o004 # read by other +TOWRITE = 0o002 # write by other +TOEXEC = 0o001 # execute/search by other + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name in ("nt", "ce"): + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] != chr(0o200): + try: + n = int(nts(s, "ascii", "strict") or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + else: + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += ord(s[i + 1]) + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 byte indicates this particular + # encoding, the following digits-1 bytes are a big-endian + # representation. This allows values up to (256**(digits-1))-1. + if 0 <= n < 8 ** (digits - 1): + s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL + else: + if format != GNU_FORMAT or n >= 256 ** (digits - 1): + raise ValueError("overflow in number field") + + if n < 0: + # XXX We mimic GNU tar's behaviour with negative numbers, + # this could raise OverflowError. + n = struct.unpack("L", struct.pack("l", n))[0] + + s = bytearray() + for i in range(digits - 1): + s.insert(0, n & 0o377) + n >>= 8 + s.insert(0, 0o200) + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) + signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + if length == 0: + return + if length is None: + while True: + buf = src.read(16*1024) + if not buf: + break + dst.write(buf) + return + + BUFSIZE = 16 * 1024 + blocks, remainder = divmod(length, BUFSIZE) + for b in range(blocks): + buf = src.read(BUFSIZE) + if len(buf) < BUFSIZE: + raise IOError("end of file reached") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise IOError("end of file reached") + dst.write(buf) + return + +filemode_table = ( + ((S_IFLNK, "l"), + (S_IFREG, "-"), + (S_IFBLK, "b"), + (S_IFDIR, "d"), + (S_IFCHR, "c"), + (S_IFIFO, "p")), + + ((TUREAD, "r"),), + ((TUWRITE, "w"),), + ((TUEXEC|TSUID, "s"), + (TSUID, "S"), + (TUEXEC, "x")), + + ((TGREAD, "r"),), + ((TGWRITE, "w"),), + ((TGEXEC|TSGID, "s"), + (TSGID, "S"), + (TGEXEC, "x")), + + ((TOREAD, "r"),), + ((TOWRITE, "w"),), + ((TOEXEC|TSVTX, "t"), + (TSVTX, "T"), + (TOEXEC, "x")) +) + +def filemode(mode): + """Convert a file's mode to a string of the form + -rwxrwxrwx. + Used by TarFile.list() + """ + perm = [] + for table in filemode_table: + for bit, char in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append("-") + return "".join(perm) + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadble tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile(object): + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream(object): + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = name or "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self._init_read_gz() + else: + self._init_write_gz() + + if comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + else: + self.cmp = bz2.BZ2Compressor() + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack("<L", int(time.time())) + self.__write(b"\037\213\010\010" + timestamp + b"\002\377") + if self.name.endswith(".gz"): + self.name = self.name[:-3] + # RFC1952 says we must use ISO-8859-1 for the FNAME field. + self.__write(self.name.encode("iso-8859-1", "replace") + NUL) + + def write(self, s): + """Write string s to the stream. + """ + if self.comptype == "gz": + self.crc = self.zlib.crc32(s, self.crc) + self.pos += len(s) + if self.comptype != "tar": + s = self.cmp.compress(s) + self.__write(s) + + def __write(self, s): + """Write string s to the stream if a whole new block + is ready to be written. + """ + self.buf += s + while len(self.buf) > self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + # The native zlib crc is an unsigned 32-bit integer, but + # the Python wrapper implicitly casts that to a signed C + # long. So, on a 32-bit box self.crc may "look negative", + # while the same crc on a 64-bit box may "look positive". + # To avoid irksome warnings from the `struct` module, force + # it to look positive on all boxes. + self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff)) + self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF)) + + if not self._extfileobj: + self.fileobj.close() + + self.closed = True + + def _init_read_gz(self): + """Initialize for reading a gzip compressed fileobj. + """ + self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) + self.dbuf = b"" + + # taken from gzip.GzipFile with some alterations + if self.__read(2) != b"\037\213": + raise ReadError("not a gzip file") + if self.__read(1) != b"\010": + raise CompressionError("unsupported compression method") + + flag = ord(self.__read(1)) + self.__read(6) + + if flag & 4: + xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) + self.read(xlen) + if flag & 8: + while True: + s = self.__read(1) + if not s or s == NUL: + break + if flag & 16: + while True: + s = self.__read(1) + if not s or s == NUL: + break + if flag & 2: + self.__read(2) + + def tell(self): + """Return the stream's file pointer position. + """ + return self.pos + + def seek(self, pos=0): + """Set the stream's file pointer to pos. Negative seeking + is forbidden. + """ + if pos - self.pos >= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size=None): + """Return the next size number of bytes from the stream. + If size is not defined, return all bytes of the stream + up to EOF. + """ + if size is None: + t = [] + while True: + buf = self._read(self.bufsize) + if not buf: + break + t.append(buf) + buf = "".join(t) + else: + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + while c < size: + buf = self.__read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except IOError: + raise ReadError("invalid compressed data") + self.dbuf += buf + c += len(buf) + buf = self.dbuf[:size] + self.dbuf = self.dbuf[size:] + return buf + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + self.buf += buf + c += len(buf) + buf = self.buf[:size] + self.buf = self.buf[size:] + return buf +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\037\213\010"): + return "gz" + if self.buf.startswith(b"BZh91"): + return "bz2" + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +class _BZ2Proxy(object): + """Small proxy class that enables external file object + support for "r:bz2" and "w:bz2" modes. This is actually + a workaround for a limitation in bz2 module's BZ2File + class which (unlike gzip.GzipFile) has no support for + a file object argument. + """ + + blocksize = 16 * 1024 + + def __init__(self, fileobj, mode): + self.fileobj = fileobj + self.mode = mode + self.name = getattr(self.fileobj, "name", None) + self.init() + + def init(self): + import bz2 + self.pos = 0 + if self.mode == "r": + self.bz2obj = bz2.BZ2Decompressor() + self.fileobj.seek(0) + self.buf = b"" + else: + self.bz2obj = bz2.BZ2Compressor() + + def read(self, size): + x = len(self.buf) + while x < size: + raw = self.fileobj.read(self.blocksize) + if not raw: + break + data = self.bz2obj.decompress(raw) + self.buf += data + x += len(data) + + buf = self.buf[:size] + self.buf = self.buf[size:] + self.pos += len(buf) + return buf + + def seek(self, pos): + if pos < self.pos: + self.init() + self.read(pos - self.pos) + + def tell(self): + return self.pos + + def write(self, data): + self.pos += len(data) + raw = self.bz2obj.compress(data) + self.fileobj.write(raw) + + def close(self): + if self.mode == "w": + raw = self.bz2obj.flush() + self.fileobj.write(raw) +# class _BZ2Proxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def seekable(self): + if not hasattr(self.fileobj, "seekable"): + # XXX gzip.GzipFile and bz2.BZ2File + return True + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position): + """Seek to a position in the file. + """ + self.position = position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + buf += self.fileobj.read(length) + else: + buf += NUL * length + size -= length + self.position += length + return buf +#class _FileInFile + + +class ExFileObject(object): + """File-like object for reading an archive member. + Is returned by TarFile.extractfile(). + """ + blocksize = 1024 + + def __init__(self, tarfile, tarinfo): + self.fileobj = _FileInFile(tarfile.fileobj, + tarinfo.offset_data, + tarinfo.size, + tarinfo.sparse) + self.name = tarinfo.name + self.mode = "r" + self.closed = False + self.size = tarinfo.size + + self.position = 0 + self.buffer = b"" + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def read(self, size=None): + """Read at most size bytes from the file. If size is not + present or None, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + buf = b"" + if self.buffer: + if size is None: + buf = self.buffer + self.buffer = b"" + else: + buf = self.buffer[:size] + self.buffer = self.buffer[size:] + + if size is None: + buf += self.fileobj.read() + else: + buf += self.fileobj.read(size - len(buf)) + + self.position += len(buf) + return buf + + # XXX TextIOWrapper uses the read1() method. + read1 = read + + def readline(self, size=-1): + """Read one entire line from the file. If size is present + and non-negative, return a string with at most that + size, which may be an incomplete line. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + while True: + buf = self.fileobj.read(self.blocksize) + self.buffer += buf + if not buf or b"\n" in buf: + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + pos = len(self.buffer) + break + + if size != -1: + pos = min(size, pos) + + buf = self.buffer[:pos] + self.buffer = self.buffer[pos:] + self.position += len(buf) + return buf + + def readlines(self): + """Return a list with all remaining lines. + """ + result = [] + while True: + line = self.readline() + if not line: break + result.append(line) + return result + + def tell(self): + """Return the current file position. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + return self.position + + def seek(self, pos, whence=os.SEEK_SET): + """Seek to a position in the file. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + if whence == os.SEEK_SET: + self.position = min(max(pos, 0), self.size) + elif whence == os.SEEK_CUR: + if pos < 0: + self.position = max(self.position + pos, 0) + else: + self.position = min(self.position + pos, self.size) + elif whence == os.SEEK_END: + self.position = max(min(self.size + pos, self.size), 0) + else: + raise ValueError("Invalid argument") + + self.buffer = b"" + self.fileobj.seek(self.position) + + def close(self): + """Close the file object. + """ + self.closed = True + + def __iter__(self): + """Get an iterator over the file's lines. + """ + while True: + line = self.readline() + if not line: + break + yield line +#class ExFileObject + +#------------------ +# Exported Classes +#------------------ +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", + "chksum", "type", "linkname", "uname", "gname", + "devmajor", "devminor", + "offset", "offset_data", "pax_headers", "sparse", + "tarfile", "_sparse_structs", "_link_target") + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + # In pax headers the "name" and "linkname" field are called + # "path" and "linkpath". + def _getpath(self): + return self.name + def _setpath(self, name): + self.name = name + path = property(_getpath, _setpath) + + def _getlinkpath(self): + return self.linkname + def _setlinkpath(self, linkname): + self.linkname = linkname + linkpath = property(_getlinkpath, _setlinkpath) + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + info = { + "name": self.name, + "mode": self.mode & 0o7777, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"]) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"]) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"]) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"]) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"]) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + if name in pax_headers: + # The pax header has priority. Avoid overflow. + info[name] = 0 + continue + + val = info[name] + if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): + pax_headers[name] = str(val) + info[name] = 0 + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") + + def _posix_split_name(self, name): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + prefix = name[:LENGTH_PREFIX + 1] + while prefix and prefix[-1] != "/": + prefix = prefix[:-1] + + name = name[len(prefix):] + prefix = prefix[:-1] + + if not prefix or len(name) > LENGTH_NAME: + raise ValueError("name is too long") + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + info.get("type", REGTYPE), + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + itn(info.get("devmajor", 0), 8, format), + itn(info.get("devminor", 0), 8, format), + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf8") + if binary: + # Try to restore the original byte representation of `value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save the them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. + match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) + if match is not None: + pax_headers["hdrcharset"] = match.group(1).decode("utf8") + + # For the time being, we don't care about anything other than "BINARY". + # The only other value that is currently allowed by the standard is + # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + hdrcharset = pax_headers.get("hdrcharset") + if hdrcharset == "BINARY": + encoding = tarfile.encoding + else: + encoding = "utf8" + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. keyword and value are both UTF-8 encoded strings. + regex = re.compile(br"(\d+) ([^=]+)=") + pos = 0 + while True: + match = regex.match(buf, pos) + if not match: + break + + length, keyword = match.groups() + length = int(length) + value = buf[match.end(2) + 1:match.start(1) + length - 1] + + # Normally, we could just use "utf8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(keyword, "utf8", "utf8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(value, "utf8", "utf8", + tarfile.errors) + + pax_headers[keyword] = value + pos += length + + # Fetch the next header. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, pax_headers, buf) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, pax_headers, buf): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): + offsets.append(int(match.group(1))) + numbytes = [] + for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): + numbytes.append(int(match.group(1))) + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + return self.type in REGULAR_TYPES + def isfile(self): + return self.isreg() + def isdir(self): + return self.type == DIRTYPE + def issym(self): + return self.type == SYMTYPE + def islnk(self): + return self.type == LNKTYPE + def ischr(self): + return self.type == CHRTYPE + def isblk(self): + return self.type == BLKTYPE + def isfifo(self): + return self.type == FIFOTYPE + def issparse(self): + return self.sparse is not None + def isdev(self): + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The default ExFileObject class to use. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): + """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + self.mode = mode + self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if name is None and hasattr(fileobj, "name"): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) + + if self.mode in "aw": + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + for comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + if fileobj is not None: + fileobj.seek(saved_pos) + continue + raise ReadError("file could not be opened successfully") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + stream = _Stream(name, filemode, comptype, fileobj, bufsize) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in "aw": + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + try: + import gzip + gzip.GzipFile + except (ImportError, AttributeError): + raise CompressionError("gzip module is not available") + + extfileobj = fileobj is not None + try: + fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) + t = cls.taropen(name, mode, fileobj, **kwargs) + except IOError: + if not extfileobj and fileobj is not None: + fileobj.close() + if fileobj is None: + raise + raise ReadError("not a gzip file") + except: + if not extfileobj and fileobj is not None: + fileobj.close() + raise + t._extfileobj = extfileobj + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'.") + + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + + if fileobj is not None: + fileobj = _BZ2Proxy(fileobj, mode) + else: + fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (IOError, EOFError): + fileobj.close() + raise ReadError("not a bzip2 file") + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open" # bzip2 compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + if self.mode in "aw": + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + + if not self._extfileobj: + self.fileobj.close() + self.closed = True + + def getmember(self, name): + """Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object for either the file `name' or the file + object `fileobj' (using os.fstat on its file descriptor). You can + modify some of the TarInfo's attributes before you add it using + addfile(). If given, `arcname' specifies an alternative name for the + file in the archive. + """ + self._check("aw") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo.tarfile = self + + # Use os.stat or os.lstat, depending on platform + # and if symlinks shall be resolved. + if fileobj is None: + if hasattr(os, "lstat") and not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + if pwd: + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + if grp: + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True): + """Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. + """ + self._check() + + for tarinfo in self: + if verbose: + print(filemode(tarinfo.mode), end=' ') + print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid), end=' ') + if tarinfo.ischr() or tarinfo.isblk(): + print("%10s" % ("%d,%d" \ + % (tarinfo.devmajor, tarinfo.devminor)), end=' ') + else: + print("%10d" % tarinfo.size, end=' ') + print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6], end=' ') + + print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') + + if verbose: + if tarinfo.issym(): + print("->", tarinfo.linkname, end=' ') + if tarinfo.islnk(): + print("link to", tarinfo.linkname, end=' ') + print() + + def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): + """Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `exclude' is a function that should + return True for each filename to be excluded. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("aw") + + if arcname is None: + arcname = name + + # Exclude pathnames. + if exclude is not None: + import warnings + warnings.warn("use the filter argument instead", + DeprecationWarning, 2) + if exclude(name): + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + f = bltn_open(name, "rb") + self.addfile(tarinfo, f) + f.close() + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in os.listdir(name): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, exclude, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects using gettarinfo(). + On Windows platforms, `fileobj' should always be opened with mode + 'rb' to avoid irritation about the file size. + """ + self._check("aw") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 0o700 + # Do not set_attrs directories, as we will do that further down + self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name) + directories.reverse() + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extract(self, member, path="", set_attrs=True): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + # Prepare the link target for makelink(). + if tarinfo.islnk(): + tarinfo._link_target = os.path.join(path, tarinfo.linkname) + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs) + except EnvironmentError as e: + if self.errorlevel > 0: + raise + else: + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extractfile(self, member): + """Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file, a + file-like object is returned. If `member' is a link, a file-like + object is constructed from the link's target. If `member' is none of + the above, None is returned. + The file-like object is read-only and provides the following + methods: read(), readline(), readlines(), seek() and tell() + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg(): + return self.fileobject(self, tarinfo) + + elif tarinfo.type not in SUPPORTED_TYPES: + # If a member's type is unknown, it is treated as a + # regular file. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True): + """Extract the TarInfo object tarinfo to a physical + file called targetpath. + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink(tarinfo, targetpath) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except EnvironmentError as e: + if e.errno != errno.EEXIST: + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + target = bltn_open(targetpath, "wb") + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size) + else: + copyfileobj(source, target, tarinfo.size) + target.seek(tarinfo.size) + target.truncate() + target.close() + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + """ + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + os.symlink(tarinfo.linkname, targetpath) + else: + # See extract(). + if os.path.exists(tarinfo._link_target): + os.link(tarinfo._link_target, targetpath) + else: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except symlink_exception: + if tarinfo.issym(): + linkpath = os.path.join(os.path.dirname(tarinfo.name), + tarinfo.linkname) + else: + linkpath = tarinfo.linkname + else: + try: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except KeyError: + raise ExtractError("unable to resolve link inside archive") + + def chown(self, tarinfo, targetpath): + """Set owner of targetpath according to tarinfo. + """ + if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + try: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + g = tarinfo.gid + try: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + u = tarinfo.uid + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + if sys.platform != "os2emx": + os.chown(targetpath, u, g) + except EnvironmentError as e: + raise ExtractError("could not change owner") + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if hasattr(os, 'chmod'): + try: + os.chmod(targetpath, tarinfo.mode) + except EnvironmentError as e: + raise ExtractError("could not change mode") + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) + except EnvironmentError as e: + raise ExtractError("could not change modification time") + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Read the next block. + self.fileobj.seek(self.offset) + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) + except SubsequentHeaderError as e: + raise ReadError(str(e)) + break + + if tarinfo is not None: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + if tarinfo is not None: + members = members[:members.index(tarinfo)] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + def _load(self): + """Read through the entire archive file and look for readable + members. + """ + while True: + tarinfo = self.next() + if tarinfo is None: + break + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise IOError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise IOError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + return iter(self.members) + else: + return TarIter(self) + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True +# class TarFile + +class TarIter(object): + """Iterator Class. + + for tarinfo in TarFile(...): + suite... + """ + + def __init__(self, tarfile): + """Construct a TarIter object. + """ + self.tarfile = tarfile + self.index = 0 + def __iter__(self): + """Return iterator object. + """ + return self + + def __next__(self): + """Return the next item using TarFile's next() method. + When all members have been read, set TarFile as _loaded. + """ + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will cause TarIter to stop prematurely. + if not self.tarfile._loaded: + tarinfo = self.tarfile.next() + if not tarinfo: + self.tarfile._loaded = True + raise StopIteration + else: + try: + tarinfo = self.tarfile.members[self.index] + except IndexError: + raise StopIteration + self.index += 1 + return tarinfo + + next = __next__ # for Python 2.x + +#-------------------- +# exported functions +#-------------------- +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + """ + try: + t = open(name) + t.close() + return True + except TarError: + return False + +bltn_open = open +open = TarFile.open diff --git a/awx/lib/site-packages/pip/vendor/distlib/compat.py b/awx/lib/site-packages/pip/vendor/distlib/compat.py new file mode 100644 index 0000000000..fd2c1cb8e2 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/compat.py @@ -0,0 +1,754 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import absolute_import + +import os +import re +import sys + +if sys.version_info[0] < 3: + from StringIO import StringIO + string_types = basestring, + text_type = unicode + from types import FileType as file_type + import __builtin__ as builtins + import ConfigParser as configparser + from ._backport import shutil + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit + from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, + pathname2url, ContentTooShortError, splittype) + + def quote(s): + if isinstance(s, unicode): + s = s.encode('utf-8') + return _quote(s) + + import urllib2 + from urllib2 import (Request, urlopen, URLError, HTTPError, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPSHandler, HTTPHandler, HTTPRedirectHandler, + build_opener) + import httplib + import xmlrpclib + import Queue as queue + from HTMLParser import HTMLParser + import htmlentitydefs + raw_input = raw_input + from itertools import ifilter as filter + from itertools import ifilterfalse as filterfalse + + _userprog = None + def splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + global _userprog + if _userprog is None: + import re + _userprog = re.compile('^(.*)@(.*)$') + + match = _userprog.match(host) + if match: return match.group(1, 2) + return None, host + + class CertificateError(ValueError): + pass + + + def _dnsname_to_pat(dn): + pats = [] + for frag in dn.split(r'.'): + if frag == '*': + # When '*' is a fragment by itself, it matches a non-empty + # dotless fragment. + pats.append('[^.]+') + else: + # Otherwise, '*' matches any dotless fragment. + frag = re.escape(frag) + pats.append(frag.replace(r'\*', '[^.]*')) + return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules + are mostly followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_to_pat(value).match(hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_to_pat(value).match(hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + +else: + from io import StringIO + string_types = str, + text_type = str + from io import TextIOWrapper as file_type + import builtins + import configparser + import shutil + from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, + unquote, urlsplit, urlunsplit, splittype) + from urllib.request import (urlopen, urlretrieve, Request, url2pathname, + pathname2url, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPSHandler, HTTPHandler, HTTPRedirectHandler, + build_opener) + from urllib.error import HTTPError, URLError, ContentTooShortError + import http.client as httplib + import urllib.request as urllib2 + import xmlrpc.client as xmlrpclib + import queue + from html.parser import HTMLParser + import html.entities as htmlentitydefs + raw_input = input + from itertools import filterfalse + filter = filter + + from ssl import match_hostname, CertificateError + +# ZipFile is a context manager in 2.7, but not in 2.6 + +from zipfile import ZipFile as BaseZipFile + +if hasattr(BaseZipFile, '__enter__'): + ZipFile = BaseZipFile +else: + from zipfile import ZipExtFile as BaseZipExtFile + + class ZipExtFile(BaseZipExtFile): + def __init__(self, base): + self.__dict__.update(base.__dict__) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + class ZipFile(BaseZipFile): + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + def open(self, *args, **kwargs): + base = BaseZipFile.open(self, *args, **kwargs) + return ZipExtFile(base) + +try: + from platform import python_implementation +except ImportError: # pragma: no cover + def python_implementation(): + """Return a string identifying the Python implementation.""" + if 'PyPy' in sys.version: + return 'PyPy' + if os.name == 'java': + return 'Jython' + if sys.version.startswith('IronPython'): + return 'IronPython' + return 'CPython' + +try: + import sysconfig +except ImportError: # pragma: no cover + from ._backport import sysconfig + +try: + callable = callable +except NameError: # pragma: no cover + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode + fsdecode = os.fsdecode +except AttributeError: # pragma: no cover + _fsencoding = sys.getfilesystemencoding() + if _fsencoding == 'mbcs': + _fserrors = 'strict' + else: + _fserrors = 'surrogateescape' + + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, text_type): + return filename.encode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + + def fsdecode(filename): + if isinstance(filename, text_type): + return filename + elif isinstance(filename, bytes): + return filename.decode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + +try: + from tokenize import detect_encoding +except ImportError: # pragma: no cover + from codecs import BOM_UTF8, lookup + import re + + cookie_re = re.compile("coding[:=]\s*([-\w.]+)") + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + + def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argment, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + +# For converting & <-> & etc. +try: + from html import escape +except ImportError: + from cgi import escape +unescape = HTMLParser().unescape + +try: + from collections import ChainMap +except ImportError: # pragma: no cover + from collections import MutableMapping + + try: + from reprlib import recursive_repr as _recursive_repr + except ImportError: + def _recursive_repr(fillvalue='...'): + ''' + Decorator to make a repr function return fillvalue for a recursive + call + ''' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + + class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + +try: + from imp import cache_from_source +except ImportError: # pragma: no cover + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix + +try: + from collections import OrderedDict +except ImportError: # pragma: no cover +## {{{ http://code.activestate.com/recipes/576693/ (r9) +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. + try: + from thread import get_ident as _get_ident + except ImportError: + from dummy_thread import get_ident as _get_ident + + try: + from _abcoll import KeysView, ValuesView, ItemsView + except ImportError: + pass + + + class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running=None): + 'od.__repr__() <==> repr(od)' + if not _repr_running: _repr_running = {} + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) diff --git a/awx/lib/site-packages/pip/vendor/distlib/database.py b/awx/lib/site-packages/pip/vendor/distlib/database.py new file mode 100644 index 0000000000..eb6444a5ee --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/database.py @@ -0,0 +1,1301 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""PEP 376 implementation.""" + +from __future__ import unicode_literals + +import base64 +import codecs +import hashlib +import logging +import os +import sys +import zipimport + +from . import DistlibException +from .compat import StringIO, configparser, string_types +from .version import get_scheme, UnsupportedVersionError +from .markers import interpret +from .metadata import Metadata +from .util import (parse_requirement, cached_property, get_export_entry, + CSVReader, CSVWriter) + + +__all__ = ['Distribution', 'BaseInstalledDistribution', + 'InstalledDistribution', 'EggInfoDistribution', + 'DistributionPath'] + + +logger = logging.getLogger(__name__) + +DIST_FILES = ('INSTALLER', 'METADATA', 'RECORD', 'REQUESTED', 'RESOURCES', + 'EXPORTS', 'SHARED') + +DISTINFO_EXT = '.dist-info' + + +class _Cache(object): + """ + A simple cache mapping names and .dist-info paths to distributions + """ + def __init__(self): + """ + Initialise an instance. There is normally one for each DistributionPath. + """ + self.name = {} + self.path = {} + self.generated = False + + def clear(self): + """ + Clear the cache, setting it to its initial state. + """ + self.name.clear() + self.path.clear() + self.generated = False + + def add(self, dist): + """ + Add a distribution to the cache. + :param dist: The distribution to add. + """ + if dist.path not in self.path: + self.path[dist.path] = dist + self.name.setdefault(dist.key, []).append(dist) + +class DistributionPath(object): + """ + Represents a set of distributions installed on a path (typically sys.path). + """ + def __init__(self, path=None, include_egg=False): + """ + Create an instance from a path, optionally including legacy (distutils/ + setuptools/distribute) distributions. + :param path: The path to use, as a list of directories. If not specified, + sys.path is used. + :param include_egg: If True, this instance will look for and return legacy + distributions as well as those based on PEP 376. + """ + if path is None: + path = sys.path + self.path = path + self._include_dist = True + self._include_egg = include_egg + + self._cache = _Cache() + self._cache_egg = _Cache() + self._cache_enabled = True + self._scheme = get_scheme('default') + + def _get_cache_enabled(self): + return self._cache_enabled + + def _set_cache_enabled(self, value): + self._cache_enabled = value + + cache_enabled = property(_get_cache_enabled, _set_cache_enabled) + + def clear_cache(self): + """ + Clears the internal cache. + """ + self._cache.clear() + self._cache_egg.clear() + + + def _yield_distributions(self): + """ + Yield .dist-info and/or .egg(-info) distributions. + """ + for path in self.path: + realpath = os.path.realpath(path) + if not os.path.isdir(realpath): + continue + for dir in os.listdir(realpath): + dist_path = os.path.join(realpath, dir) + if self._include_dist and dir.endswith(DISTINFO_EXT): + yield new_dist_class(dist_path, env=self) + elif self._include_egg and dir.endswith(('.egg-info', + '.egg')): + yield old_dist_class(dist_path, self) + + def _generate_cache(self): + """ + Scan the path for distributions and populate the cache with + those that are found. + """ + gen_dist = not self._cache.generated + gen_egg = self._include_egg and not self._cache_egg.generated + if gen_dist or gen_egg: + for dist in self._yield_distributions(): + if isinstance(dist, InstalledDistribution): + self._cache.add(dist) + else: + self._cache_egg.add(dist) + + if gen_dist: + self._cache.generated = True + if gen_egg: + self._cache_egg.generated = True + + @classmethod + def distinfo_dirname(cls, name, version): + """ + The *name* and *version* parameters are converted into their + filename-escaped form, i.e. any ``'-'`` characters are replaced + with ``'_'`` other than the one in ``'dist-info'`` and the one + separating the name from the version number. + + :parameter name: is converted to a standard distribution name by replacing + any runs of non- alphanumeric characters with a single + ``'-'``. + :type name: string + :parameter version: is converted to a standard version string. Spaces + become dots, and all other non-alphanumeric characters + (except dots) become dashes, with runs of multiple + dashes condensed to a single dash. + :type version: string + :returns: directory name + :rtype: string""" + name = name.replace('-', '_') + return '-'.join([name, version]) + DISTINFO_EXT + + + def get_distributions(self): + """ + Provides an iterator that looks for distributions and returns + :class:`InstalledDistribution` or + :class:`EggInfoDistribution` instances for each one of them. + + :rtype: iterator of :class:`InstalledDistribution` and + :class:`EggInfoDistribution` instances + """ + if not self._cache_enabled: + for dist in self._yield_distributions(): + yield dist + else: + self._generate_cache() + + for dist in self._cache.path.values(): + yield dist + + if self._include_egg: + for dist in self._cache_egg.path.values(): + yield dist + + + def get_distribution(self, name): + """ + Looks for a named distribution on the path. + + This function only returns the first result found, as no more than one + value is expected. If nothing is found, ``None`` is returned. + + :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` + or ``None`` + """ + result = None + name = name.lower() + if not self._cache_enabled: + for dist in self._yield_distributions(): + if dist.key == name: + result = dist + break + else: + self._generate_cache() + + if name in self._cache.name: + result = self._cache.name[name][0] + elif self._include_egg and name in self._cache_egg.name: + result = self._cache_egg.name[name][0] + return result + + def provides_distribution(self, name, version=None): + """ + Iterates over all distributions to find which distributions provide *name*. + If a *version* is provided, it will be used to filter the results. + + This function only returns the first result found, since no more than + one values are expected. If the directory is not found, returns ``None``. + + :parameter version: a version specifier that indicates the version + required, conforming to the format in ``PEP-345`` + + :type name: string + :type version: string + """ + matcher = None + if not version is None: + try: + matcher = self._scheme.matcher('%s (%s)' % (name, version)) + except ValueError: + raise DistlibException('invalid name or version: %r, %r' % + (name, version)) + + for dist in self.get_distributions(): + provided = dist.provides + + for p in provided: + p_components = p.rsplit(' ', 1) + if len(p_components) == 1 or matcher is None: + if name == p_components[0]: + yield dist + break + else: + p_name, p_ver = p_components + if len(p_ver) < 2 or p_ver[0] != '(' or p_ver[-1] != ')': + raise DistlibException( + 'distribution %r has invalid Provides field: %r' % + (dist.name, p)) + p_ver = p_ver[1:-1] # trim off the parenthesis + if p_name == name and matcher.match(p_ver): + yield dist + break + + def get_file_path(self, name, relative_path): + """ + Return the path to a resource file. + """ + dist = self.get_distribution(name) + if dist is None: + raise LookupError('no distribution named %r found' % name) + return dist.get_resource_path(relative_path) + + def get_exported_entries(self, category, name=None): + """ + Return all of the exported entries in a particular category. + + :param category: The category to search for entries. + :param name: If specified, only entries with that name are returned. + """ + for dist in self.get_distributions(): + r = dist.exports + if category in r: + d = r[category] + if name is not None: + if name in d: + yield d[name] + else: + for v in d.values(): + yield v + +class Distribution(object): + """ + A base class for distributions, whether installed or from indexes. + Either way, it must have some metadata, so that's all that's needed + for construction. + """ + + build_time_dependency = False + """ + Set to True if it's known to be only a build-time dependency (i.e. + not needed after installation). + """ + + requested = False + """A boolean that indicates whether the ``REQUESTED`` metadata file is + present (in other words, whether the package was installed by user + request or it was installed as a dependency).""" + + def __init__(self, metadata): + """ + Initialise an instance. + :param metadata: The instance of :class:`Metadata` describing this + distribution. + """ + self.metadata = metadata + self.name = metadata.name + self.key = self.name.lower() # for case-insensitive comparisons + self.version = metadata.version + self.locator = None + self.md5_digest = None + self.extras = None # additional features requested during installation + + @property + def download_url(self): + """ + The download URL for this distribution. + """ + return self.metadata.download_url + + @property + def name_and_version(self): + """ + A utility property which displays the name and version in parentheses. + """ + return '%s (%s)' % (self.name, self.version) + + @property + def provides(self): + """ + A set of distribution names and versions provided by this distribution. + :return: A set of "name (version)" strings. + """ + plist = self.metadata['Provides-Dist'] + s = '%s (%s)' % (self.name, self.version) + if s not in plist: + plist.append(s) + return self.filter_requirements(plist) + + @property + def requires(self): + rlist = self.metadata['Requires-Dist'] + return self.filter_requirements(rlist) + + @property + def setup_requires(self): + rlist = self.metadata['Setup-Requires-Dist'] + return self.filter_requirements(rlist) + + @property + def test_requires(self): + rlist = self.metadata['Requires-Dist'] + return self.filter_requirements(rlist, extras=['test']) + + @property + def doc_requires(self): + rlist = self.metadata['Requires-Dist'] + return self.filter_requirements(rlist, extras=['doc']) + + def filter_requirements(self, rlist, context=None, extras=None): + result = set() + marked = [] + for req in rlist: + if ';' not in req: + result.add(req) + else: + marked.append(req.split(';', 1)) + if marked: + if context is None: + context = {} + if extras is None: + extras = self.extras + if not extras: + extras = [None] + else: + extras = list(extras) # leave original alone + extras.append(None) + for extra in extras: + context['extra'] = extra + for r, marker in marked: + if interpret(marker, context): + result.add(r.strip()) + return result + + def matches_requirement(self, req): + """ + Say if this instance matches (fulfills) a requirement. + :param req: The requirement to match. + :rtype req: str + :return: True if it matches, else False. + """ + scheme = get_scheme(self.metadata.scheme) + try: + matcher = scheme.matcher(req) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + result = False + # Note this is similar to code in make_graph - to be refactored + for p in self.provides: + vm = scheme.matcher(p) + if vm.key != name: + continue + version = vm.exact_version + assert version + try: + result = matcher.match(version) + break + except UnsupportedVersionError: + pass + return result + + def __repr__(self): + """ + Return a textual representation of this instance, + """ + if self.download_url: + suffix = ' [%s]' % self.download_url + else: + suffix = '' + return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix) + + def __eq__(self, other): + """ + See if this distribution is the same as another. + :param other: The distribution to compare with. To be equal to one + another. distributions must have the same type, name, + version and download_url. + :return: True if it is the same, else False. + """ + if type(other) is not type(self): + result = False + else: + result = (self.name == other.name and + self.version == other.version and + self.download_url == other.download_url) + return result + + def __hash__(self): + """ + Compute hash in a way which matches the equality test. + """ + return hash(self.name) + hash(self.version) + hash(self.download_url) + + +class BaseInstalledDistribution(Distribution): + """ + This is the base class for installed distributions (whether PEP 376 or + legacy). + """ + + hasher = None + + def __init__(self, metadata, path, env=None): + """ + Initialise an instance. + :param metadata: An instance of :class:`Metadata` which describes the + distribution. This will normally have been initialised + from a metadata file in the ``path``. + :param path: The path of the ``.dist-info`` or ``.egg-info`` + directory for the distribution. + :param env: This is normally the :class:`DistributionPath` + instance where this distribution was found. + """ + super(BaseInstalledDistribution, self).__init__(metadata) + self.path = path + self.dist_path = env + + def get_hash(self, data, hasher=None): + """ + Get the hash of some data, using a particular hash algorithm, if + specified. + + :param data: The data to be hashed. + :type data: bytes + :param hasher: The name of a hash implementation, supported by hashlib, + or ``None``. Examples of valid values are ``'sha1'``, + ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and + ``'sha512'``. If no hasher is specified, the ``hasher`` + attribute of the :class:`InstalledDistribution` instance + is used. If the hasher is determined to be ``None``, MD5 + is used as the hashing algorithm. + :returns: The hash of the data. If a hasher was explicitly specified, + the returned hash will be prefixed with the specified hasher + followed by '='. + :rtype: str + """ + if hasher is None: + hasher = self.hasher + if hasher is None: + hasher = hashlib.md5 + prefix = '' + else: + hasher = getattr(hashlib, hasher) + prefix = '%s=' % self.hasher + digest = hasher(data).digest() + digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') + return '%s%s' % (prefix, digest) + +class InstalledDistribution(BaseInstalledDistribution): + """Created with the *path* of the ``.dist-info`` directory provided to the + constructor. It reads the metadata contained in ``METADATA`` when it is + instantiated., or uses a passed in Metadata instance (useful for when + dry-run mode is being used).""" + + hasher = 'sha256' + + def __init__(self, path, metadata=None, env=None): + if env and env._cache_enabled and path in env._cache.path: + metadata = env._cache.path[path].metadata + elif metadata is None: + metadata_path = os.path.join(path, 'METADATA') + metadata = Metadata(path=metadata_path, scheme='legacy') + + super(InstalledDistribution, self).__init__(metadata, path, env) + + if env and env._cache_enabled: + env._cache.add(self) + + path = self.get_distinfo_file('REQUESTED') + self.requested = os.path.exists(path) + + def __repr__(self): + return '<InstalledDistribution %r %s at %r>' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def _get_records(self): + """ + Get the list of installed files for the distribution + :return: A list of tuples of path, hash and size. Note that hash and + size might be ``None`` for some entries. The path is exactly + as stored in the file (which is as in PEP 376). + """ + results = [] + path = self.get_distinfo_file('RECORD') + with CSVReader(path) as record_reader: + # Base location is parent dir of .dist-info dir + #base_location = os.path.dirname(self.path) + #base_location = os.path.abspath(base_location) + for row in record_reader: + missing = [None for i in range(len(row), 3)] + path, checksum, size = row + missing + #if not os.path.isabs(path): + # path = path.replace('/', os.sep) + # path = os.path.join(base_location, path) + results.append((path, checksum, size)) + return results + + @cached_property + def exports(self): + """ + Return the information exported by this distribution. + :return: A dictionary of exports, mapping an export category to a list + of :class:`ExportEntry` instances describing the individual + export entries. + """ + result = {} + rf = self.get_distinfo_file('EXPORTS') + if os.path.exists(rf): + result = self.read_exports(rf) + return result + + def read_exports(self, filename=None): + """ + Read exports data from a file in .ini format. + :param filename: An absolute pathname of the file to read. If not + specified, the EXPORTS file in the .dist-info + directory of the distribution is read. + :return: A dictionary of exports, mapping an export category to a list + of :class:`ExportEntry` instances describing the individual + export entries. + """ + result = {} + rf = filename or self.get_distinfo_file('EXPORTS') + if os.path.exists(rf): + cp = configparser.ConfigParser() + cp.read(rf) + for key in cp.sections(): + result[key] = entries = {} + for name, value in cp.items(key): + s = '%s = %s' % (name, value) + entry = get_export_entry(s) + assert entry is not None + entry.dist = self + entries[name] = entry + return result + + def write_exports(self, exports, filename=None): + """ + Write a dictionary of exports to a file in .ini format. + :param exports: A dictionary of exports, mapping an export category to + a list of :class:`ExportEntry` instances describing the + individual export entries. + :param filename: The absolute pathname of the file to write to. If not + specified, the EXPORTS file in the .dist-info + directory is written to. + """ + rf = filename or self.get_distinfo_file('EXPORTS') + cp = configparser.ConfigParser() + for k, v in exports.items(): + # TODO check k, v for valid values + cp.add_section(k) + for entry in v.values(): + if entry.suffix is None: + s = entry.prefix + else: + s = '%s:%s' % (entry.prefix, entry.suffix) + if entry.flags: + s = '%s [%s]' % (s, ', '.join(entry.flags)) + cp.set(k, entry.name, s) + with open(rf, 'w') as f: + cp.write(f) + + def get_resource_path(self, relative_path): + """ + NOTE: This API may change in the future. + + Return the absolute path to a resource file with the given relative + path. + + :param relative_path: The path, relative to .dist-info, of the resource + of interest. + :return: The absolute path where the resource is to be found. + """ + path = self.get_distinfo_file('RESOURCES') + with CSVReader(path) as resources_reader: + for relative, destination in resources_reader: + if relative == relative_path: + return destination + raise KeyError('no resource file with relative path %r ' + 'is installed' % relative_path) + + def list_installed_files(self): + """ + Iterates over the ``RECORD`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: iterator of (path, hash, size) + """ + for result in self._get_records(): + yield result + + def write_installed_files(self, paths, prefix, dry_run=False): + """ + Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any + existing ``RECORD`` file is silently overwritten. + + prefix is used to determine when to write absolute paths. + """ + prefix = os.path.join(prefix, '') + base = os.path.dirname(self.path) + base_under_prefix = base.startswith(prefix) + base = os.path.join(base, '') + record_path = os.path.join(self.path, 'RECORD') + logger.info('creating %s', record_path) + if dry_run: + return + with CSVWriter(record_path) as writer: + for path in paths: + if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): + # do not put size and hash, as in PEP-376 + hash_value = size = '' + else: + size = '%d' % os.path.getsize(path) + with open(path, 'rb') as fp: + hash_value = self.get_hash(fp.read()) + if path.startswith(base) or (base_under_prefix and + path.startswith(prefix)): + path = os.path.relpath(path, base) + writer.writerow((path, hash_value, size)) + + # add the RECORD file itself + if record_path.startswith(base): + record_path = os.path.relpath(record_path, base) + writer.writerow((record_path, '', '')) + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + base = os.path.dirname(self.path) + record_path = os.path.join(self.path, 'RECORD') + for path, hash_value, size in self.list_installed_files(): + if not os.path.isabs(path): + path = os.path.join(base, path) + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + elif os.path.isfile(path): + actual_size = str(os.path.getsize(path)) + if size and actual_size != size: + mismatches.append((path, 'size', size, actual_size)) + elif hash_value: + if '=' in hash_value: + hasher = hash_value.split('=', 1)[0] + else: + hasher = None + + with open(path, 'rb') as f: + actual_hash = self.get_hash(f.read(), hasher) + if actual_hash != hash_value: + mismatches.append((path, 'hash', hash_value, actual_hash)) + return mismatches + + @cached_property + def shared_locations(self): + """ + A dictionary of shared locations whose keys are in the set 'prefix', + 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. + The corresponding value is the absolute path of that category for + this distribution, and takes into account any paths selected by the + user at installation time (e.g. via command-line arguments). In the + case of the 'namespace' key, this would be a list of absolute paths + for the roots of namespace packages in this distribution. + + The first time this property is accessed, the relevant information is + read from the SHARED file in the .dist-info directory. + """ + result = {} + shared_path = os.path.join(self.path, 'SHARED') + if os.path.isfile(shared_path): + with codecs.open(shared_path, 'r', encoding='utf-8') as f: + lines = f.read().splitlines() + for line in lines: + key, value = line.split('=', 1) + if key == 'namespace': + result.setdefault(key, []).append(value) + else: + result[key] = value + return result + + def write_shared_locations(self, paths, dry_run=False): + """ + Write shared location information to the SHARED file in .dist-info. + :param paths: A dictionary as described in the documentation for + :meth:`shared_locations`. + :param dry_run: If True, the action is logged but no file is actually + written. + :return: The path of the file written to. + """ + shared_path = os.path.join(self.path, 'SHARED') + logger.info('creating %s', shared_path) + if dry_run: + return + lines = [] + for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): + path = paths[key] + if os.path.isdir(paths[key]): + lines.append('%s=%s' % (key, path)) + for ns in paths.get('namespace', ()): + lines.append('namespace=%s' % ns) + + with codecs.open(shared_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + return shared_path + + def get_distinfo_file(self, path): + """ + Returns a path located under the ``.dist-info`` directory. Returns a + string representing the path. + + :parameter path: a ``'/'``-separated path relative to the + ``.dist-info`` directory or an absolute path; + If *path* is an absolute path and doesn't start + with the ``.dist-info`` directory path, + a :class:`DistlibException` is raised + :type path: string + :rtype: str + """ + # Check if it is an absolute path # XXX use relpath, add tests + if path.find(os.sep) >= 0: + # it's an absolute path? + distinfo_dirname, path = path.split(os.sep)[-2:] + if distinfo_dirname != self.path.split(os.sep)[-1]: + raise DistlibException( + 'dist-info file %r does not belong to the %r %s ' + 'distribution' % (path, self.name, self.version)) + + # The file must be relative + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: %r' % + path) + + return os.path.join(self.path, path) + + def list_distinfo_files(self): + """ + Iterates over the ``RECORD`` entries and returns paths for each line if + the path is pointing to a file located in the ``.dist-info`` directory + or one of its subdirectories. + + :returns: iterator of paths + """ + base = os.path.dirname(self.path) + for path, checksum, size in self._get_records(): + # XXX add separator or use real relpath algo + if not os.path.isabs(path): + path = os.path.join(base, path) + if path.startswith(self.path): + yield path + + def __eq__(self, other): + return (isinstance(other, InstalledDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + + +class EggInfoDistribution(BaseInstalledDistribution): + """Created with the *path* of the ``.egg-info`` directory or file provided + to the constructor. It reads the metadata contained in the file itself, or + if the given path happens to be a directory, the metadata is read from the + file ``PKG-INFO`` under that directory.""" + + requested = True # as we have no way of knowing, assume it was + shared_locations = {} + + def __init__(self, path, env=None): + def set_name_and_version(s, n, v): + s.name = n + s.key = n.lower() # for case-insensitive comparisons + s.version = v + + self.path = path + self.dist_path = env + if env and env._cache_enabled and path in env._cache_egg.path: + metadata = env._cache_egg.path[path].metadata + set_name_and_version(self, metadata['Name'], metadata['Version']) + else: + metadata = self._get_metadata(path) + + # Need to be set before caching + set_name_and_version(self, metadata['Name'], metadata['Version']) + + if env and env._cache_enabled: + env._cache_egg.add(self) + super(EggInfoDistribution, self).__init__(metadata, path, env) + + def _get_metadata(self, path): + requires = None + + def parse_requires(req_path): + """Create a list of dependencies from a requires.txt file. + + *req_path* must be the path to a setuptools-produced requires.txt file. + """ + + reqs = [] + try: + with open(req_path, 'r') as fp: + lines = fp.read().splitlines() + except IOError: + return reqs + + for line in lines: + line = line.strip() + if line.startswith('['): + logger.warning('Unexpected line: quitting requirement scan: %r', + line) + break + r = parse_requirement(line) + if not r: + logger.warning('Not recognised as a requirement: %r', line) + continue + if r.extras: + logger.warning('extra requirements in requires.txt are ' + 'not supported') + if not r.constraints: + reqs.append(r.name) + else: + cons = ', '.join('%s%s' % c for c in r.constraints) + reqs.append('%s (%s)' % (r.name, cons)) + return reqs + + if path.endswith('.egg'): + if os.path.isdir(path): + meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO') + metadata = Metadata(path=meta_path, scheme='legacy') + req_path = os.path.join(path, 'EGG-INFO', 'requires.txt') + requires = parse_requires(req_path) + else: + # FIXME handle the case where zipfile is not available + zipf = zipimport.zipimporter(path) + fileobj = StringIO( + zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) + metadata = Metadata(fileobj=fileobj, scheme='legacy') + try: + requires = zipf.get_data('EGG-INFO/requires.txt') + except IOError: + requires = None + elif path.endswith('.egg-info'): + if os.path.isdir(path): + path = os.path.join(path, 'PKG-INFO') + req_path = os.path.join(path, 'requires.txt') + requires = parse_requires(req_path) + metadata = Metadata(path=path, scheme='legacy') + else: + raise DistlibException('path must end with .egg-info or .egg, ' + 'got %r' % path) + + if requires: + if metadata['Metadata-Version'] == '1.1': + # we can't have 1.1 metadata *and* Setuptools requires + for field in ('Obsoletes', 'Requires', 'Provides'): + if field in metadata: + del metadata[field] + metadata['Requires-Dist'] += requires + return metadata + + def __repr__(self): + return '<EggInfoDistribution %r %s at %r>' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + for path, hash, size in self.list_installed_files(): + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + return mismatches + + def list_installed_files(self, local=False): + """ + Iterates over the ``installed-files.txt`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: a list of (path, hash, size) + """ + + def _md5(path): + f = open(path, 'rb') + try: + content = f.read() + finally: + f.close() + return hashlib.md5(content).hexdigest() + + def _size(path): + return os.stat(path).st_size + + record_path = os.path.join(self.path, 'installed-files.txt') + result = [] + if os.path.exists(record_path): + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + p = os.path.normpath(os.path.join(self.path, line)) + # "./" is present as a marker between installed files + # and installation metadata files + if not os.path.exists(p): + logger.warning('Non-existent file: %s', p) + if p.endswith(('.pyc', '.pyo')): + continue + #otherwise fall through and fail + if not os.path.isdir(p): + result.append((p, _md5(p), _size(p))) + result.append((record_path, None, None)) + return result + + def list_distinfo_files(self, local=False): + """ + Iterates over the ``installed-files.txt`` entries and returns paths for + each line if the path is pointing to a file located in the + ``.egg-info`` directory or one of its subdirectories. + + :parameter local: If *local* is ``True``, each returned path is + transformed into a local absolute path. Otherwise the + raw value from ``installed-files.txt`` is returned. + :type local: boolean + :returns: iterator of paths + """ + record_path = os.path.join(self.path, 'installed-files.txt') + skip = True + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + if line == './': + skip = False + continue + if not skip: + p = os.path.normpath(os.path.join(self.path, line)) + if p.startswith(self.path): + if local: + yield p + else: + yield line + + def __eq__(self, other): + return (isinstance(other, EggInfoDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + +new_dist_class = InstalledDistribution +old_dist_class = EggInfoDistribution + + +class DependencyGraph(object): + """ + Represents a dependency graph between distributions. + + The dependency relationships are stored in an ``adjacency_list`` that maps + distributions to a list of ``(other, label)`` tuples where ``other`` + is a distribution and the edge is labeled with ``label`` (i.e. the version + specifier, if such was provided). Also, for more efficient traversal, for + every distribution ``x``, a list of predecessors is kept in + ``reverse_list[x]``. An edge from distribution ``a`` to + distribution ``b`` means that ``a`` depends on ``b``. If any missing + dependencies are found, they are stored in ``missing``, which is a + dictionary that maps distributions to a list of requirements that were not + provided by any other distributions. + """ + + def __init__(self): + self.adjacency_list = {} + self.reverse_list = {} + self.missing = {} + + def add_distribution(self, distribution): + """Add the *distribution* to the graph. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + """ + self.adjacency_list[distribution] = [] + self.reverse_list[distribution] = [] + #self.missing[distribution] = [] + + def add_edge(self, x, y, label=None): + """Add an edge from distribution *x* to distribution *y* with the given + *label*. + + :type x: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type y: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type label: ``str`` or ``None`` + """ + self.adjacency_list[x].append((y, label)) + # multiple edges are allowed, so be careful + if x not in self.reverse_list[y]: + self.reverse_list[y].append(x) + + def add_missing(self, distribution, requirement): + """ + Add a missing *requirement* for the given *distribution*. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + :type requirement: ``str`` + """ + logger.debug('%s missing %r', distribution, requirement) + self.missing.setdefault(distribution, []).append(requirement) + + def _repr_dist(self, dist): + return '%s %s' % (dist.name, dist.version) + + def repr_node(self, dist, level=1): + """Prints only a subgraph""" + output = [self._repr_dist(dist)] + for other, label in self.adjacency_list[dist]: + dist = self._repr_dist(other) + if label is not None: + dist = '%s [%s]' % (dist, label) + output.append(' ' * level + str(dist)) + suboutput = self.repr_node(other, level + 1) + subs = suboutput.split('\n') + output.extend(subs[1:]) + return '\n'.join(output) + + def to_dot(self, f, skip_disconnected=True): + """Writes a DOT output for the graph to the provided file *f*. + + If *skip_disconnected* is set to ``True``, then all distributions + that are not dependent on any other distribution are skipped. + + :type f: has to support ``file``-like operations + :type skip_disconnected: ``bool`` + """ + disconnected = [] + + f.write("digraph dependencies {\n") + for dist, adjs in self.adjacency_list.items(): + if len(adjs) == 0 and not skip_disconnected: + disconnected.append(dist) + for other, label in adjs: + if not label is None: + f.write('"%s" -> "%s" [label="%s"]\n' % + (dist.name, other.name, label)) + else: + f.write('"%s" -> "%s"\n' % (dist.name, other.name)) + if not skip_disconnected and len(disconnected) > 0: + f.write('subgraph disconnected {\n') + f.write('label = "Disconnected"\n') + f.write('bgcolor = red\n') + + for dist in disconnected: + f.write('"%s"' % dist.name) + f.write('\n') + f.write('}\n') + f.write('}\n') + + def topological_sort(self): + """ + Perform a topological sort of the graph. + :return: A tuple, the first element of which is a topologically sorted + list of distributions, and the second element of which is a + list of distributions that cannot be sorted because they have + circular dependencies and so form a cycle. + """ + result = [] + # Make a shallow copy of the adjacency list + alist = {} + for k, v in self.adjacency_list.items(): + alist[k] = v[:] + while True: + # See what we can remove in this run + to_remove = [] + for k, v in list(alist.items())[:]: + if not v: + to_remove.append(k) + del alist[k] + if not to_remove: + # What's left in alist (if anything) is a cycle. + break + # Remove from the adjacency list of others + for k, v in alist.items(): + alist[k] = [(d, r) for d, r in v if d not in to_remove] + logger.debug('Moving to result: %s', + ['%s (%s)' % (d.name, d.version) for d in to_remove]) + result.extend(to_remove) + return result, list(alist.keys()) + + def __repr__(self): + """Representation of the graph""" + output = [] + for dist, adjs in self.adjacency_list.items(): + output.append(self.repr_node(dist)) + return '\n'.join(output) + + +def make_graph(dists, scheme='default'): + """Makes a dependency graph from the given distributions. + + :parameter dists: a list of distributions + :type dists: list of :class:`distutils2.database.InstalledDistribution` and + :class:`distutils2.database.EggInfoDistribution` instances + :rtype: a :class:`DependencyGraph` instance + """ + scheme = get_scheme(scheme) + graph = DependencyGraph() + provided = {} # maps names to lists of (version, dist) tuples + + # first, build the graph and find out what's provided + for dist in dists: + graph.add_distribution(dist) + + for p in dist.provides: + comps = p.strip().rsplit(" ", 1) + name = comps[0] + version = None + if len(comps) == 2: + version = comps[1] + if len(version) < 3 or version[0] != '(' or version[-1] != ')': + logger.warning('distribution %r has ill-formed ' + 'provides field: %r', dist.name, p) + continue + # don't raise an exception. Legacy installed distributions + # could have all manner of metadata + #raise DistlibException('distribution %r has ill-formed ' + # 'provides field: %r' % (dist.name, p)) + version = version[1:-1] # trim off parenthesis + # Add name in lower case for case-insensitivity + name = name.lower() + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + provided.setdefault(name, []).append((version, dist)) + + # now make the edges + for dist in dists: + requires = (dist.requires | dist.setup_requires) + for req in requires: + try: + matcher = scheme.matcher(req) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + matched = False + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + graph.add_edge(dist, provider, req) + matched = True + break + if not matched: + graph.add_missing(dist, req) + return graph + + +def get_dependent_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + dependent on *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + dep = [dist] # dependent distributions + todo = graph.reverse_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop() + dep.append(d) + for succ in graph.reverse_list[d]: + if succ not in dep: + todo.append(succ) + + dep.pop(0) # remove dist from dep, was there to prevent infinite loops + return dep + +def get_required_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + required by *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + req = [] # required distributions + todo = graph.adjacency_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop()[0] + req.append(d) + for pred in graph.adjacency_list[d]: + if pred not in req: + todo.append(pred) + + return req + +def make_dist(name, version, **kwargs): + """ + A convenience method for making a dist given just a name and version. + """ + md = Metadata(**kwargs) + md['Name'] = name + md['Version'] = version + return Distribution(md) diff --git a/awx/lib/site-packages/pip/vendor/distlib/index.py b/awx/lib/site-packages/pip/vendor/distlib/index.py new file mode 100644 index 0000000000..282f6b7455 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/index.py @@ -0,0 +1,515 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import hashlib +import itertools +import logging +import os +import shutil +import socket +from string import ascii_lowercase +import subprocess +import tempfile +from threading import Thread + +from distlib import DistlibException +from distlib.compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, + urlparse, build_opener) +from distlib.util import cached_property, zip_dir + +logger = logging.getLogger(__name__) + +DEFAULT_MIRROR_HOST = 'last.pypi.python.org' +DEFAULT_INDEX = 'http://pypi.python.org/pypi' +DEFAULT_REALM = 'pypi' + +class PackageIndex(object): + """ + This class represents a package index compatible with PyPI, the Python + Package Index. + """ + + boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' + + def __init__(self, url=None, mirror_host=None): + """ + Initialise an instance. + + :param url: The URL of the index. If not specified, the URL for PyPI is + used. + :param mirror_host: If not specified, ``last.pypi.python.org`` is used. + This is expected to have a canonial name which + allows all mirror hostnames to be divined (e.g. if + the canonical hostname for ``last.pypi.python.org`` + is ``g.pypi.python.org``, then the mirrors that are + available would be assumed to be + ``a.pypi.python.org``, ``b.pypi.python.org``, ... + up to and including ``g.pypi.python.org``. + """ + self.url = url or DEFAULT_INDEX + self.mirror_host = mirror_host or DEFAULT_MIRROR_HOST + self.read_configuration() + scheme, netloc, path, params, query, frag = urlparse(self.url) + if params or query or frag or scheme not in ('http', 'https'): + raise DistlibException('invalid repository: %s' % self.url) + self.password_handler = None + self.ssl_verifier = None + self.gpg = None + self.gpg_home = None + with open(os.devnull, 'w') as sink: + for s in ('gpg2', 'gpg'): + try: + rc = subprocess.check_call([s, '--version'], stdout=sink, + stderr=sink) + if rc == 0: + self.gpg = s + break + except OSError: + pass + + def _get_pypirc_command(self): + """ + Get the distutils command for interacting with PyPI configurations. + :return: the command. + """ + from distutils.core import Distribution + from distutils.config import PyPIRCCommand + d = Distribution() + return PyPIRCCommand(d) + + def read_configuration(self): + """ + Read the PyPI access configuration as supported by distutils, getting + PyPI to do the acutal work. This populates ``username``, ``password``, + ``realm`` and ``url`` attributes from the configuration. + """ + # get distutils to do the work + c = self._get_pypirc_command() + c.repository = self.url + cfg = c._read_pypirc() + self.username = cfg.get('username') + self.password = cfg.get('password') + self.realm = cfg.get('realm', 'pypi') + self.url = cfg.get('repository', self.url) + + def save_configuration(self): + """ + Save the PyPI access configuration. You must have set ``username`` and + ``password`` attributes before calling this method. + + Again, distutils is used to do the actual work. + """ + self.check_credentials() + # get distutils to do the work + c = self._get_pypirc_command() + c._store_pypirc(self.username, self.password) + + def check_credentials(self): + """ + Check that ``username`` and ``password`` have been set, and raise an + exception if not. + """ + if self.username is None or self.password is None: + raise DistlibException('username and password must be set') + pm = HTTPPasswordMgr() + _, netloc, _, _, _, _ = urlparse(self.url) + pm.add_password(self.realm, netloc, self.username, self.password) + self.password_handler = HTTPBasicAuthHandler(pm) + + def register(self, metadata): + """ + Register a distribution on PyPI, using the provided metadata. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the distribution to be + registered. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + missing, warnings = metadata.check(True) # strict check + logger.debug('result of check: missing: %s, warnings: %s', + missing, warnings) + d = metadata.todict(True) + d[':action'] = 'verify' + request = self.encode_request(d.items(), []) + response = self.send_request(request) + d[':action'] = 'submit' + request = self.encode_request(d.items(), []) + return self.send_request(request) + + def _reader(self, name, stream, outbuf): + """ + Thread runner for reading lines of from a subprocess into a buffer. + + :param name: The logical name of the stream (used for logging only). + :param stream: The stream to read from. This will typically a pipe + connected to the output stream of a subprocess. + :param outbuf: The list to append the read lines to. + """ + while True: + s = stream.readline() + if not s: + break + s = s.decode('utf-8').rstrip() + outbuf.append(s) + logger.debug('%s: %s' % (name, s)) + stream.close() + + def get_sign_command(self, filename, signer, sign_password): + """ + Return a suitable command for signing a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :return: The signing command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if self.gpg_home: + cmd.extend(['--homedir', self.gpg_home]) + if sign_password is not None: + cmd.extend(['--batch', '--passphrase-fd', '0']) + td = tempfile.mkdtemp() + sf = os.path.join(td, os.path.basename(filename) + '.asc') + cmd.extend(['--detach-sign', '--armor', '--local-user', + signer, '--output', sf, filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd, sf + + def run_command(self, cmd, input_data=None): + """ + Run a command in a child process , passing it any input data specified. + + :param cmd: The command to run. + :param input_data: If specified, this must be a byte string containing + data to be sent to the child process. + :return: A tuple consisting of the subprocess' exit code, a list of + lines read from the subprocess' ``stdout``, and a list of + lines read from the subprocess' ``stderr``. + """ + kwargs = { + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE, + } + if input_data is not None: + kwargs['stdin'] = subprocess.PIPE + stdout = [] + stderr = [] + p = subprocess.Popen(cmd, **kwargs) + # We don't use communicate() here because we may need to + # get clever with interacting with the command + t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) + t1.start() + t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) + t2.start() + if input_data is not None: + p.stdin.write(input_data) + p.stdin.close() + + p.wait() + t1.join() + t2.join() + return p.returncode, stdout, stderr + + def sign_file(self, filename, signer, sign_password): + """ + Sign a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :return: The absolute pathname of the file where the signature is + stored. + """ + cmd, sig_file = self.get_sign_command(filename, signer, sign_password) + rc, stdout, stderr = self.run_command(cmd, + sign_password.encode('utf-8')) + if rc != 0: + raise DistlibException('sign command failed with error ' + 'code %s' % rc) + return sig_file + + def upload_file(self, metadata, filename, signer=None, sign_password=None, + filetype='sdist', pyversion='source'): + """ + Upload a release file to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the file to be uploaded. + :param filename: The pathname of the file to be uploaded. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param filetype: The type of the file being uploaded. This is the + distutils command which produced that file, e.g. + ``sdist`` or ``bdist_wheel``. + :param pyversion: The version of Python which the release relates + to. For code compatible with any Python, this would + be ``source``, otherwise it would be e.g. ``3.2``. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.exists(filename): + raise DistlibException('not found: %s' % filename) + missing, warnings = metadata.check(True) # strict check + logger.debug('result of check: missing: %s, warnings: %s', + missing, warnings) + d = metadata.todict(True) + sig_file = None + if signer: + if not self.gpg: + logger.warning('no signing program available - not signed') + else: + sig_file = self.sign_file(filename, signer, sign_password) + with open(filename, 'rb') as f: + file_data = f.read() + digest = hashlib.md5(file_data).hexdigest() + d.update({ + ':action': 'file_upload', + 'protcol_version': '1', + 'filetype': filetype, + 'pyversion': pyversion, + 'md5_digest': digest, + }) + files = [('content', os.path.basename(filename), file_data)] + if sig_file: + with open(sig_file, 'rb') as f: + sig_data = f.read() + files.append(('gpg_signature', os.path.basename(sig_file), + sig_data)) + shutil.rmtree(os.path.dirname(sig_file)) + logger.debug('files: %s', files) + request = self.encode_request(d.items(), files) + return self.send_request(request) + + def upload_documentation(self, metadata, doc_dir): + """ + Upload documentation to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the documentation to be + uploaded. + :param doc_dir: The pathname of the directory which contains the + documentation. This should be the directory that + contains the ``index.html`` for the documentation. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.isdir(doc_dir): + raise DistlibException('not a directory: %r' % doc_dir) + fn = os.path.join(doc_dir, 'index.html') + if not os.path.exists(fn): + raise DistlibException('not found: %r' % fn) + missing, warnings = metadata.check(True) # strict check + logger.debug('result of check: missing: %s, warnings: %s', + missing, warnings) + name, version = metadata.name, metadata.version + zip_data = zip_dir(doc_dir).getvalue() + fields = [(':action', 'doc_upload'), + ('name', name), ('version', version)] + files = [('content', name, zip_data)] + request = self.encode_request(fields, files) + return self.send_request(request) + + def get_verify_command(self, signature_filename, data_filename): + """ + Return a suitable command for verifying a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :return: The verifying command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if self.gpg_home: + cmd.extend(['--homedir', self.gpg_home]) + cmd.extend(['--verify', signature_filename, data_filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd + + def verify_signature(self, signature_filename, data_filename): + """ + Verify a signature for a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :return: True if the signature was verified, else False. + """ + if not self.gpg: + raise DistlibException('verification unavailable because gpg ' + 'unavailable') + cmd = self.get_verify_command(signature_filename, data_filename) + rc, stdout, stderr = self.run_command(cmd) + if rc not in (0, 1): + raise DistlibException('verify command failed with error ' + 'code %s' % rc) + return rc == 0 + + def download_file(self, url, destfile, digest=None, reporthook=None): + """ + This is a convenience method for downloading a file from an URL. + Normally, this will be a file from the index, though currently + no check is made for this (i.e. a file can be downloaded from + anywhere). + + The method is just like the :func:`urlretrieve` function in the + standard library, except that it allows digest computation to be + done during download and checking that the downloaded data + matched any expected value. + + :param url: The URL of the file to be downloaded (assumed to be + available via an HTTP GET request). + :param destfile: The pathname where the downloaded file is to be + saved. + :param digest: If specified, this must be a (hasher, value) + tuple, where hasher is the algorithm used (e.g. + ``'md5'``) and ``value`` is the expected value. + :param reporthook: The same as for :func:`urlretrieve` in the + standard library. + """ + if digest is None: + digester = None + else: + if isinstance(digest, (list, tuple)): + hasher, digest = digest + else: + hasher = 'md5' + digester = getattr(hashlib, hasher)() + # The following code is equivalent to urlretrieve. + # We need to do it this way so that we can compute the + # digest of the file as we go. + with open(destfile, 'wb') as dfp: + # addinfourl is not a context manager on 2.x + # so we have to use try/finally + sfp = self.send_request(Request(url)) + try: + headers = sfp.info() + blocksize = 8192 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + if reporthook: + reporthook(blocknum, blocksize, size) + while True: + block = sfp.read(blocksize) + if not block: + break + read += len(block) + dfp.write(block) + if digester: + digester.update(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, blocksize, size) + finally: + sfp.close() + + # check that we got the whole file, if we can + if size >= 0 and read < size: + raise DistlibException( + 'retrieval incomplete: got only %d out of %d bytes' + % (read, size)) + # if we have a digest, it must match. + if digester: + actual = digester.hexdigest() + if digest != actual: + raise DistlibException('MD5 digest mismatch for %s: expected ' + '%s, got %s' % (destfile, digest, + actual)) + + def send_request(self, req): + """ + Send a standard library :class:`Request` to PyPI and return its + response. + + :param req: The request to send. + :return: The HTTP response from PyPI (a standard library HTTPResponse). + """ + handlers = [] + if self.password_handler: + handlers.append(self.password_handler) + if self.ssl_verifier: + handlers.append(self.ssl_verifier) + opener = build_opener(*handlers) + return opener.open(req) + + def encode_request(self, fields, files): + """ + Encode fields and files for posting to an HTTP server. + + :param fields: The fields to send as a list of (fieldname, value) + tuples. + :param files: The files to send as a list of (fieldname, filename, + file_bytes) tuple. + """ + # Adapted from packaging, which in turn was adapted from + # http://code.activestate.com/recipes/146306 + + parts = [] + boundary = self.boundary + for k, values in fields: + if not isinstance(values, (list, tuple)): + values = [values] + + for v in values: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"' % + k).encode('utf-8'), + b'', + v.encode('utf-8'))) + for key, filename, value in files: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"; filename="%s"' % + (key, filename)).encode('utf-8'), + b'', + value)) + + parts.extend((b'--' + boundary + b'--', b'')) + + body = b'\r\n'.join(parts) + ct = b'multipart/form-data; boundary=' + boundary + headers = { + 'Content-type': ct, + 'Content-length': str(len(body)) + } + return Request(self.url, body, headers) + + @cached_property + def mirrors(self): + """ + Return the list of hostnames which are mirrors for this index. + :return: A (possibly empty) list of hostnames of mirrors. + """ + result = [] + try: + host = socket.gethostbyname_ex(self.mirror_host)[0] + except socket.gaierror: # pragma: no cover + host = None + if host: + last, rest = host.split('.', 1) + n = len(last) + host_list = (''.join(w) for w in itertools.chain.from_iterable( + itertools.product(ascii_lowercase, repeat=i) + for i in range(1, n + 1))) + for s in host_list: + result.append('.'.join((s, rest))) + if s == last: + break + return result diff --git a/awx/lib/site-packages/pip/vendor/distlib/locators.py b/awx/lib/site-packages/pip/vendor/distlib/locators.py new file mode 100644 index 0000000000..3dab8a2fcb --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/locators.py @@ -0,0 +1,1131 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# + +import gzip +from io import BytesIO +import json +import logging +import os +import posixpath +import re +import threading +import zlib + +from . import DistlibException +from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, + queue, quote, unescape, string_types, build_opener, + HTTPRedirectHandler as BaseRedirectHandler, + Request, HTTPError, URLError) +from .database import Distribution, DistributionPath, make_dist +from .metadata import Metadata +from .util import (cached_property, parse_credentials, ensure_slash, + split_filename, get_project_data, parse_requirement, + ServerProxy) +from .version import get_scheme, UnsupportedVersionError +from .wheel import Wheel, is_compatible + +logger = logging.getLogger(__name__) + +MD5_HASH = re.compile('^md5=([a-f0-9]+)$') +CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) +HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') +DEFAULT_INDEX = 'http://python.org/pypi' + +def get_all_distribution_names(url=None): + """ + Return all distribution names known by an index. + :param url: The URL of the index. + :return: A list of all known distribution names. + """ + if url is None: + url = DEFAULT_INDEX + client = ServerProxy(url, timeout=3.0) + return client.list_packages() + +class RedirectHandler(BaseRedirectHandler): + """ + A class to work around a bug in some Python 3.2.x releases. + """ + # There's a bug in the base version for some 3.2.x + # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header + # returns e.g. /abc, it bails because it says the scheme '' + # is bogus, when actually it should use the request's + # URL for the scheme. See Python issue #13696. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + newurl = None + for key in ('location', 'uri'): + if key in headers: + newurl = headers[key] + break + if newurl is None: + return + urlparts = urlparse(newurl) + if urlparts.scheme == '': + newurl = urljoin(req.get_full_url(), newurl) + if hasattr(headers, 'replace_header'): + headers.replace_header(key, newurl) + else: + headers[key] = newurl + return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, + headers) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + +class Locator(object): + """ + A base class for locators - things that locate distributions. + """ + source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') + binary_extensions = ('.egg', '.exe', '.whl') + excluded_extensions = ('.pdf',) + + # A list of tags indicating which wheels you want to match. The default + # value of None matches against the tags compatible with the running + # Python. If you want to match other values, set wheel_tags on a locator + # instance to a list of tuples (pyver, abi, arch) which you want to match. + wheel_tags = None + + downloadable_extensions = source_extensions + ('.whl',) + + def __init__(self, scheme='default'): + """ + Initialise an instance. + :param scheme: Because locators look for most recent versions, they + need to know the version scheme to use. This specifies + the current PEP-recommended scheme - use ``'legacy'`` + if you need to support existing distributions on PyPI. + """ + self._cache = {} + self.scheme = scheme + # Because of bugs in some of the handlers on some of the platforms, + # we use our own opener rather than just using urlopen. + self.opener = build_opener(RedirectHandler()) + + def clear_cache(self): + self._cache.clear() + + def _get_scheme(self): + return self._scheme + + def _set_scheme(self, value): + self._scheme = value + + scheme = property(_get_scheme, _set_scheme) + + def _get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This should be implemented in subclasses. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This calls _get_project to do all the work, and just implements a caching layer on top. + """ + if self._cache is None: + result = self._get_project(name) + elif name in self._cache: + result = self._cache[name] + else: + result = self._get_project(name) + self._cache[name] = result + return result + + def score_url(self, url): + """ + Give an url a score which can be used to choose preferred URLs + for a given project release. + """ + t = urlparse(url) + return (t.scheme != 'https', 'pypi.python.org' in t.netloc, + posixpath.basename(t.path)) + + def prefer_url(self, url1, url2): + """ + Choose one of two URLs where both are candidates for distribution + archives for the same version of a distribution (for example, + .tar.gz vs. zip). + + The current implement favours http:// URLs over https://, archives + from PyPI over those from other locations and then the archive name. + """ + if url1 == 'UNKNOWN': + result = url2 + else: + result = url2 + s1 = self.score_url(url1) + s2 = self.score_url(url2) + if s1 > s2: + result = url1 + if result != url2: + logger.debug('Not replacing %r with %r', url1, url2) + else: + logger.debug('Replacing %r with %r', url1, url2) + return result + + def split_filename(self, filename, project_name): + """ + Attempt to split a filename in project name, version and Python version. + """ + return split_filename(filename, project_name) + + def convert_url_to_download_info(self, url, project_name): + """ + See if a URL is a candidate for a download URL for a project (the URL + has typically been scraped from an HTML page). + + If it is, a dictionary is returned with keys "name", "version", + "filename" and "url"; otherwise, None is returned. + """ + def same_project(name1, name2): + name1, name2 = name1.lower(), name2.lower() + if name1 == name2: + result = True + else: + # distribute replaces '-' by '_' in project names, so it + # can tell where the version starts in a filename. + result = name1.replace('_', '-') == name2.replace('_', '-') + return result + + result = None + scheme, netloc, path, params, query, frag = urlparse(url) + if frag.lower().startswith('egg='): + logger.debug('%s: version hint in fragment: %r', + project_name, frag) + origpath = path + if path and path[-1] == '/': + path = path[:-1] + if path.endswith('.whl'): + try: + wheel = Wheel(path) + if is_compatible(wheel, self.wheel_tags): + if project_name is None: + include = True + else: + include = same_project(wheel.name, project_name) + if include: + result = { + 'name': wheel.name, + 'version': wheel.version, + 'filename': wheel.filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + 'python-version': ', '.join( + ['.'.join(list(v[2:])) for v in wheel.pyver]), + } + m = MD5_HASH.match(frag) + if m: + result['md5_digest'] = m.group(1) + except Exception as e: + logger.warning('invalid path for wheel: %s', path) + elif path.endswith(self.downloadable_extensions): + path = filename = posixpath.basename(path) + for ext in self.downloadable_extensions: + if path.endswith(ext): + path = path[:-len(ext)] + t = self.split_filename(path, project_name) + if not t: + logger.debug('No match for project/version: %s', path) + else: + name, version, pyver = t + if not project_name or same_project(project_name, name): + result = { + 'name': name, + 'version': version, + 'filename': filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + #'packagetype': 'sdist', + } + if pyver: + result['python-version'] = pyver + m = MD5_HASH.match(frag) + if m: + result['md5_digest'] = m.group(1) + break + return result + + def _update_version_data(self, result, info): + """ + Update a result dictionary (the final result from _get_project) with a dictionary for a + specific version, whih typically holds information gleaned from a filename or URL for an + archive for the distribution. + """ + name = info.pop('name') + version = info.pop('version') + if version in result: + dist = result[version] + md = dist.metadata + else: + dist = make_dist(name, version, scheme=self.scheme) + md = dist.metadata + dist.md5_digest = info.get('md5_digest') + if 'python-version' in info: + md['Requires-Python'] = info['python-version'] + if md['Download-URL'] != info['url']: + md['Download-URL'] = self.prefer_url(md['Download-URL'], + info['url']) + dist.locator = self + result[version] = dist + + def locate(self, requirement, prereleases=False): + """ + Find the most recent distribution which matches the given + requirement. + + :param requirement: A requirement of the form 'foo (1.0)' or perhaps + 'foo (>= 1.0, < 2.0, != 1.3)' + :param prereleases: If ``True``, allow pre-release versions + to be located. Otherwise, pre-release versions + are not returned. + :return: A :class:`Distribution` instance, or ``None`` if no such + distribution could be located. + """ + result = None + scheme = get_scheme(self.scheme) + r = parse_requirement(requirement) + if r is None: + raise DistlibException('Not a valid requirement: %r' % requirement) + if r.extras: + # lose the extras part of the requirement + requirement = r.requirement + matcher = scheme.matcher(requirement) + vcls = matcher.version_class + logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) + versions = self.get_project(matcher.name) + if versions: + # sometimes, versions are invalid + slist = [] + for k in versions: + try: + if not matcher.match(k): + logger.debug('%s did not match %r', matcher, k) + else: + if prereleases or not vcls(k).is_prerelease: + slist.append(k) + else: + logger.debug('skipping pre-release version %s', k) + except Exception: + logger.warning('error matching %s with %r', matcher, k) + pass # slist.append(k) + if len(slist) > 1: + slist = sorted(slist, key=scheme.key) + if slist: + logger.debug('sorted list: %s', slist) + result = versions[slist[-1]] + if result and r.extras: + result.extras = r.extras + return result + + +class PyPIRPCLocator(Locator): + """ + This locator uses XML-RPC to locate distributions. It therefore cannot be + used with simple mirrors (that only mirror file content). + """ + def __init__(self, url, **kwargs): + """ + Initialise an instance. + + :param url: The URL to use for XML-RPC. + :param kwargs: Passed to the superclass constructor. + """ + super(PyPIRPCLocator, self).__init__(**kwargs) + self.base_url = url + self.client = ServerProxy(url, timeout=3.0) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + return set(self.client.list_packages()) + + def _get_project(self, name): + result = {} + versions = self.client.package_releases(name, True) + for v in versions: + urls = self.client.release_urls(name, v) + data = self.client.release_data(name, v) + metadata = Metadata(scheme=self.scheme) + metadata.update(data) + dist = Distribution(metadata) + if urls: + info = urls[0] + metadata['Download-URL'] = info['url'] + dist.md5_digest = info.get('md5_digest') + dist.locator = self + result[v] = dist + return result + +class PyPIJSONLocator(Locator): + """ + This locator uses PyPI's JSON interface. It's very limited in functionality + nad probably not worth using. + """ + def __init__(self, url, **kwargs): + super(PyPIJSONLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {} + url = urljoin(self.base_url, '%s/json' % quote(name)) + try: + resp = self.opener.open(url) + data = resp.read().decode() # for now + d = json.loads(data) + md = Metadata(scheme=self.scheme) + md.update(d['info']) + dist = Distribution(md) + urls = d['urls'] + if urls: + info = urls[0] + md['Download-URL'] = info['url'] + dist.md5_digest = info.get('md5_digest') + dist.locator = self + result[md.version] = dist + except Exception as e: + logger.exception('JSON fetch failed: %s', e) + return result + + +class Page(object): + """ + This class represents a scraped HTML page. + """ + # The following slightly hairy-looking regex just looks for the contents of + # an anchor link, which has an attribute "href" either immediately preceded + # or immediately followed by a "rel" attribute. The attribute values can be + # declared with double quotes, single quotes or no quotes - which leads to + # the length of the expression. + _href = re.compile(""" +(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)? +href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*)) +(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))? +""", re.I | re.S | re.X) + _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S) + + def __init__(self, data, url): + """ + Initialise an instance with the Unicode page contents and the URL they + came from. + """ + self.data = data + self.base_url = self.url = url + m = self._base.search(self.data) + if m: + self.base_url = m.group(1) + + _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + @cached_property + def links(self): + """ + Return the URLs of all the links on a page together with information + about their "rel" attribute, for determining which ones to treat as + downloads and which ones to queue for further scraping. + """ + def clean(url): + "Tidy up an URL." + scheme, netloc, path, params, query, frag = urlparse(url) + return urlunparse((scheme, netloc, quote(path), + params, query, frag)) + + result = set() + for match in self._href.finditer(self.data): + d = match.groupdict('') + rel = (d['rel1'] or d['rel2'] or d['rel3'] or + d['rel4'] or d['rel5'] or d['rel6']) + url = d['url1'] or d['url2'] or d['url3'] + url = urljoin(self.base_url, url) + url = unescape(url) + url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) + result.add((url, rel)) + # We sort the result, hoping to bring the most recent versions + # to the front + result = sorted(result, key=lambda t: t[0], reverse=True) + return result + + +class SimpleScrapingLocator(Locator): + """ + A locator which scrapes HTML pages to locate downloads for a distribution. + This runs multiple threads to do the I/O; performance is at least as good + as pip's PackageFinder, which works in an analogous fashion. + """ + + # These are used to deal with various Content-Encoding schemes. + decoders = { + 'deflate': zlib.decompress, + 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), + 'none': lambda b: b, + } + + def __init__(self, url, timeout=None, num_workers=10, **kwargs): + """ + Initialise an instance. + :param url: The root URL to use for scraping. + :param timeout: The timeout, in seconds, to be applied to requests. + This defaults to ``None`` (no timeout specified). + :param num_workers: The number of worker threads you want to do I/O, + This defaults to 10. + :param kwargs: Passed to the superclass. + """ + super(SimpleScrapingLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + self.timeout = timeout + self._page_cache = {} + self._seen = set() + self._to_fetch = queue.Queue() + self._bad_hosts = set() + self.skip_externals = False + self.num_workers = num_workers + self._lock = threading.RLock() + + def _prepare_threads(self): + """ + Threads are created only when get_project is called, and terminate + before it returns. They are there primarily to parallelise I/O (i.e. + fetching web pages). + """ + self._threads = [] + for i in range(self.num_workers): + t = threading.Thread(target=self._fetch) + t.setDaemon(True) + t.start() + self._threads.append(t) + + def _wait_threads(self): + """ + Tell all the threads to terminate (by sending a sentinel value) and + wait for them to do so. + """ + # Note that you need two loops, since you can't say which + # thread will get each sentinel + for t in self._threads: + self._to_fetch.put(None) # sentinel + for t in self._threads: + t.join() + self._threads = [] + + def _get_project(self, name): + self.result = result = {} + self.project_name = name + url = urljoin(self.base_url, '%s/' % quote(name)) + self._seen.clear() + self._page_cache.clear() + self._prepare_threads() + try: + logger.debug('Queueing %s', url) + self._to_fetch.put(url) + self._to_fetch.join() + finally: + self._wait_threads() + del self.result + return result + + platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|' + r'win(32|-amd64)|macosx-?\d+)\b', re.I) + + def _is_platform_dependent(self, url): + """ + Does an URL refer to a platform-specific download? + """ + return self.platform_dependent.search(url) + + def _process_download(self, url): + """ + See if an URL is a suitable download for a project. + + If it is, register information in the result dictionary (for + _get_project) about the specific version it's for. + + Note that the return value isn't actually used other than as a boolean + value. + """ + if self._is_platform_dependent(url): + info = None + else: + info = self.convert_url_to_download_info(url, self.project_name) + logger.debug('process_download: %s -> %s', url, info) + if info: + with self._lock: # needed because self.result is shared + self._update_version_data(self.result, info) + return info + + def _should_queue(self, link, referrer, rel): + """ + Determine whether a link URL from a referring page and with a + particular "rel" attribute should be queued for scraping. + """ + scheme, netloc, path, _, _, _ = urlparse(link) + if path.endswith(self.source_extensions + self.binary_extensions + + self.excluded_extensions): + result = False + elif self.skip_externals and not link.startswith(self.base_url): + result = False + elif not referrer.startswith(self.base_url): + result = False + elif rel not in ('homepage', 'download'): + result = False + elif scheme not in ('http', 'https', 'ftp'): + result = False + elif self._is_platform_dependent(link): + result = False + else: + host = netloc.split(':', 1)[0] + if host.lower() == 'localhost': + result = False + else: + result = True + logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, + referrer, result) + return result + + def _fetch(self): + """ + Get a URL to fetch from the work queue, get the HTML page, examine its + links for download candidates and candidates for further scraping. + + This is a handy method to run in a thread. + """ + while True: + url = self._to_fetch.get() + try: + if url: + page = self.get_page(url) + if page is None: # e.g. after an error + continue + for link, rel in page.links: + if link not in self._seen: + self._seen.add(link) + if (not self._process_download(link) and + self._should_queue(link, url, rel)): + logger.debug('Queueing %s from %s', link, url) + self._to_fetch.put(link) + finally: + # always do this, to avoid hangs :-) + self._to_fetch.task_done() + if not url: + #logger.debug('Sentinel seen, quitting.') + break + + def get_page(self, url): + """ + Get the HTML for an URL, possibly from an in-memory cache. + + XXX TODO Note: this cache is never actually cleared. It's assumed that + the data won't get stale over the lifetime of a locator instance (not + necessarily true for the default_locator). + """ + # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api + scheme, netloc, path, _, _, _ = urlparse(url) + if scheme == 'file' and os.path.isdir(url2pathname(path)): + url = urljoin(ensure_slash(url), 'index.html') + + if url in self._page_cache: + result = self._page_cache[url] + logger.debug('Returning %s from cache: %s', url, result) + else: + host = netloc.split(':', 1)[0] + result = None + if host in self._bad_hosts: + logger.debug('Skipping %s due to bad host %s', url, host) + else: + req = Request(url, headers={'Accept-encoding': 'identity'}) + try: + logger.debug('Fetching %s', url) + resp = self.opener.open(req, timeout=self.timeout) + logger.debug('Fetched %s', url) + headers = resp.info() + content_type = headers.get('Content-Type', '') + if HTML_CONTENT_TYPE.match(content_type): + final_url = resp.geturl() + data = resp.read() + encoding = headers.get('Content-Encoding') + if encoding: + decoder = self.decoders[encoding] # fail if not found + data = decoder(data) + encoding = 'utf-8' + m = CHARSET.search(content_type) + if m: + encoding = m.group(1) + try: + data = data.decode(encoding) + except UnicodeError: + data = data.decode('latin-1') # fallback + result = Page(data, final_url) + self._page_cache[final_url] = result + except HTTPError as e: + if e.code != 404: + logger.exception('Fetch failed: %s: %s', url, e) + except URLError as e: + logger.exception('Fetch failed: %s: %s', url, e) + with self._lock: + self._bad_hosts.add(host) + except Exception as e: + logger.exception('Fetch failed: %s: %s', url, e) + finally: + self._page_cache[url] = result # even if None (failure) + return result + + _distname_re = re.compile('<a href=[^>]*>([^<]+)<') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + page = self.get_page(self.base_url) + if not page: + raise DistlibException('Unable to get %s' % self.base_url) + for match in self._distname_re.finditer(page.data): + result.add(match.group(1)) + return result + +class DirectoryLocator(Locator): + """ + This class locates distributions in a directory tree. + """ + + def __init__(self, path, **kwargs): + """ + Initialise an instance. + :param path: The root of the directory tree to search. + :param kwargs: Passed to the superclass constructor, + except for: + * recursive - if True (the default), subdirectories are + recursed into. If False, only the top-level directory + is searched, + """ + self.recursive = kwargs.pop('recursive', True) + super(DirectoryLocator, self).__init__(**kwargs) + path = os.path.abspath(path) + if not os.path.isdir(path): + raise DistlibException('Not a directory: %r' % path) + self.base_dir = path + + def should_include(self, filename, parent): + """ + Should a filename be considered as a candidate for a distribution + archive? As well as the filename, the directory which contains it + is provided, though not used by the current implementation. + """ + return filename.endswith(self.downloadable_extensions) + + def _get_project(self, name): + result = {} + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, name) + if info: + self._update_version_data(result, info) + if not self.recursive: + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, None) + if info: + result.add(info['name']) + if not self.recursive: + break + return result + +class JSONLocator(Locator): + """ + This locator uses special extended metadata (not available on PyPI) and is + the basis of performant dependency resolution in distlib. Other locators + require archive downloads before dependencies can be determined! As you + might imagine, that can be slow. + """ + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {} + data = get_project_data(name) + if data: + for info in data.get('files', []): + if info['ptype'] != 'sdist' or info['pyversion'] != 'source': + continue + dist = make_dist(data['name'], info['version'], + scheme=self.scheme) + md = dist.metadata + md['Download-URL'] = info['url'] + dist.md5_digest = info.get('digest') + md.dependencies = info.get('requirements', {}) + dist.exports = info.get('exports', {}) + result[dist.version] = dist + return result + +class DistPathLocator(Locator): + """ + This locator finds installed distributions in a path. It can be useful for + adding to an :class:`AggregatingLocator`. + """ + def __init__(self, distpath, **kwargs): + """ + Initialise an instance. + + :param distpath: A :class:`DistributionPath` instance to search. + """ + super(DistPathLocator, self).__init__(**kwargs) + assert isinstance(distpath, DistributionPath) + self.distpath = distpath + + def _get_project(self, name): + dist = self.distpath.get_distribution(name) + if dist is None: + result = {} + else: + result = { dist.version: dist } + return result + + +class AggregatingLocator(Locator): + """ + This class allows you to chain and/or merge a list of locators. + """ + def __init__(self, *locators, **kwargs): + """ + Initialise an instance. + + :param locators: The list of locators to search. + :param kwargs: Passed to the superclass constructor, + except for: + * merge - if False (the default), the first successful + search from any of the locators is returned. If True, + the results from all locators are merged (this can be + slow). + """ + self.merge = kwargs.pop('merge', False) + self.locators = locators + super(AggregatingLocator, self).__init__(**kwargs) + + def clear_cache(self): + super(AggregatingLocator, self).clear_cache() + for locator in self.locators: + locator.clear_cache() + + def _set_scheme(self, value): + self._scheme = value + for locator in self.locators: + locator.scheme = value + + scheme = property(Locator.scheme.fget, _set_scheme) + + def _get_project(self, name): + result = {} + for locator in self.locators: + r = locator.get_project(name) + if r: + if self.merge: + result.update(r) + else: + result = r + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for locator in self.locators: + try: + result |= locator.get_distribution_names() + except NotImplementedError: + pass + return result + + +default_locator = AggregatingLocator( + JSONLocator(), + SimpleScrapingLocator('https://pypi.python.org/simple/', + timeout=3.0)) + +locate = default_locator.locate + +class DependencyFinder(object): + """ + Locate dependencies for distributions. + """ + + def __init__(self, locator=None): + """ + Initialise an instance, using the specified locator + to locate distributions. + """ + self.locator = locator or default_locator + self.scheme = get_scheme(self.locator.scheme) + + def _get_name_and_version(self, p): + """ + A utility method used to get name and version from e.g. a Provides-Dist + value. + + :param p: A value in a form foo (1.0) + :return: The name and version as a tuple. + """ + comps = p.strip().rsplit(' ', 1) + name = comps[0] + version = None + if len(comps) == 2: + version = comps[1] + if len(version) < 3 or version[0] != '(' or version[-1] != ')': + raise DistlibException('Ill-formed provides field: %r' % p) + version = version[1:-1] # trim off parentheses + # Name in lower case for case-insensitivity + return name.lower(), version + + def add_distribution(self, dist): + """ + Add a distribution to the finder. This will update internal information + about who provides what. + :param dist: The distribution to add. + """ + logger.debug('adding distribution %s', dist) + name = dist.key + self.dists_by_name[name] = dist + self.dists[(name, dist.version)] = dist + for p in dist.provides: + name, version = self._get_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + self.provided.setdefault(name, set()).add((version, dist)) + + def remove_distribution(self, dist): + """ + Remove a distribution from the finder. This will update internal + information about who provides what. + :param dist: The distribution to remove. + """ + logger.debug('removing distribution %s', dist) + name = dist.key + del self.dists_by_name[name] + del self.dists[(name, dist.version)] + for p in dist.provides: + name, version = self._get_name_and_version(p) + logger.debug('Remove from provided: %s, %s, %s', name, version, dist) + s = self.provided[name] + s.remove((version, dist)) + if not s: + del self.provided[name] + + def get_matcher(self, reqt): + """ + Get a version matcher for a requirement. + :param reqt: The requirement + :type reqt: str + :return: A version matcher (an instance of + :class:`distlib.version.Matcher`). + """ + try: + matcher = self.scheme.matcher(reqt) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + name = reqt.split()[0] + matcher = self.scheme.matcher(name) + return matcher + + def find_providers(self, reqt): + """ + Find the distributions which can fulfill a requirement. + + :param reqt: The requirement. + :type reqt: str + :return: A set of distribution which can fulfill the requirement. + """ + matcher = self.get_matcher(reqt) + name = matcher.key # case-insensitive + result = set() + provided = self.provided + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + result.add(provider) + break + return result + + def try_to_replace(self, provider, other, problems): + """ + Attempt to replace one provider with another. This is typically used + when resolving dependencies from multiple sources, e.g. A requires + (B >= 1.0) while C requires (B >= 1.1). + + For successful replacement, ``provider`` must meet all the requirements + which ``other`` fulfills. + + :param provider: The provider we are trying to replace with. + :param other: The provider we're trying to replace. + :param problems: If False is returned, this will contain what + problems prevented replacement. This is currently + a tuple of the literal string 'cantreplace', + ``provider``, ``other`` and the set of requirements + that ``provider`` couldn't fulfill. + :return: True if we can replace ``other`` with ``provider``, else + False. + """ + rlist = self.reqts[other] + unmatched = set() + for s in rlist: + matcher = self.get_matcher(s) + if not matcher.match(provider.version): + unmatched.add(s) + if unmatched: + # can't replace other with provider + problems.add(('cantreplace', provider, other, unmatched)) + result = False + else: + # can replace other with provider + self.remove_distribution(other) + del self.reqts[other] + for s in rlist: + self.reqts.setdefault(provider, set()).add(s) + self.add_distribution(provider) + result = True + return result + + def find(self, requirement, tests=False, prereleases=False): + """ + Find a distribution matching requirement and all distributions + it depends on. Use the ``tests`` argument to determine whether + distributions used only for testing should be included in the + results. Allow ``requirement`` to be either a :class:`Distribution` + instance or a string expressing a requirement. If ``prereleases`` + is True, allow pre-release versions to be returned - otherwise, + don't. + + Return a set of :class:`Distribution` instances and a set of + problems. + + The distributions returned should be such that they have the + :attr:`required` attribute set to ``True`` if they were + from the ``requirement`` passed to ``find()``, and they have the + :attr:`build_time_dependency` attribute set to ``True`` unless they + are post-installation dependencies of the ``requirement``. + + The problems should be a tuple consisting of the string + ``'unsatisfied'`` and the requirement which couldn't be satisfied + by any distribution known to the locator. + """ + + self.provided = {} + self.dists = {} + self.dists_by_name = {} + self.reqts = {} + + if isinstance(requirement, Distribution): + dist = odist = requirement + logger.debug('passed %s as requirement', odist) + else: + dist = odist = self.locator.locate(requirement, + prereleases=prereleases) + if dist is None: + raise DistlibException('Unable to locate %r' % requirement) + logger.debug('located %s', odist) + dist.requested = True + problems = set() + todo = set([dist]) + install_dists = set([odist]) + while todo: + dist = todo.pop() + name = dist.key # case-insensitive + if name not in self.dists_by_name: + self.add_distribution(dist) + else: + #import pdb; pdb.set_trace() + other = self.dists_by_name[name] + if other != dist: + self.try_to_replace(dist, other, problems) + + ireqts = dist.requires + sreqts = dist.setup_requires + ereqts = set() + if not tests or dist not in install_dists: + treqts = set() + else: + treqts = dist.test_requires + all_reqts = ireqts | sreqts | treqts | ereqts + for r in all_reqts: + providers = self.find_providers(r) + if not providers: + logger.debug('No providers found for %r', r) + provider = self.locator.locate(r, prereleases=prereleases) + if provider is None: + logger.debug('Cannot satisfy %r', r) + problems.add(('unsatisfied', r)) + else: + n, v = provider.key, provider.version + if (n, v) not in self.dists: + todo.add(provider) + providers.add(provider) + if r in ireqts and dist in install_dists: + install_dists.add(provider) + logger.debug('Adding %s to install_dists', + provider.name_and_version) + for p in providers: + name = p.key + if name not in self.dists_by_name: + self.reqts.setdefault(p, set()).add(r) + else: + other = self.dists_by_name[name] + if other != p: + # see if other can be replaced by p + self.try_to_replace(p, other, problems) + + dists = set(self.dists.values()) + for dist in dists: + dist.build_time_dependency = dist not in install_dists + if dist.build_time_dependency: + logger.debug('%s is a build-time dependency only.', + dist.name_and_version) + logger.debug('find done for %s', odist) + return dists, problems diff --git a/awx/lib/site-packages/pip/vendor/distlib/manifest.py b/awx/lib/site-packages/pip/vendor/distlib/manifest.py new file mode 100644 index 0000000000..ae2c3ea607 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/manifest.py @@ -0,0 +1,361 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Class representing the list of files in a distribution. + +Equivalent to distutils.filelist, but fixes some problems. +""" +import fnmatch +import logging +import os +import re + +from . import DistlibException +from .compat import fsdecode +from .util import convert_path + + +__all__ = ['Manifest'] + +logger = logging.getLogger(__name__) + +# a \ followed by some spaces + EOL +_COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M) +_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) + + +class Manifest(object): + """A list of files built by on exploring the filesystem and filtered by + applying various patterns to what we find there. + """ + + def __init__(self, base=None): + """ + Initialise an instance. + + :param base: The base directory to explore under. + """ + self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) + self.prefix = self.base + os.sep + self.allfiles = None + self.files = set() + + # + # Public API + # + + def findall(self): + """Find all files under the base and set ``allfiles`` to the absolute + pathnames of files found. + """ + from stat import S_ISREG, S_ISDIR, S_ISLNK + + self.allfiles = allfiles = [] + root = self.base + stack = [root] + pop = stack.pop + push = stack.append + + while stack: + root = pop() + names = os.listdir(root) + + for name in names: + fullname = os.path.join(root, name) + + # Avoid excess stat calls -- just one will do, thank you! + stat = os.stat(fullname) + mode = stat.st_mode + if S_ISREG(mode): + allfiles.append(fsdecode(fullname)) + elif S_ISDIR(mode) and not S_ISLNK(mode): + push(fullname) + + def add(self, item): + """ + Add a file to the manifest. + + :param item: The pathname to add. This can be relative to the base. + """ + if not item.startswith(self.prefix): + item = os.path.join(self.base, item) + self.files.add(os.path.normpath(item)) + + def add_many(self, items): + """ + Add a list of files to the manifest. + + :param items: The pathnames to add. These can be relative to the base. + """ + for item in items: + self.add(item) + + def sorted(self, wantdirs=False): + """ + Return sorted files in directory order + """ + def add_dir(dirs, d): + dirs.add(d) + logger.debug('add_dir added %s', d) + if d != self.base: + parent, _ = os.path.split(d) + assert parent not in ('', '/') + add_dir(dirs, parent) + + result = set(self.files) # make a copy! + if wantdirs: + dirs = set() + for f in result: + add_dir(dirs, os.path.dirname(f)) + result |= dirs + return [os.path.join(*path_tuple) for path_tuple in + sorted(os.path.split(path) for path in result)] + + def clear(self): + """Clear all collected files.""" + self.files = set() + self.allfiles = [] + + def process_directive(self, directive): + """ + Process a directive which either adds some files from ``allfiles`` to + ``files``, or removes some files from ``files``. + + :param directive: The directive to process. This should be in a format + compatible with distutils ``MANIFEST.in`` files: + + http://docs.python.org/distutils/sourcedist.html#commands + """ + # Parse the line: split it up, make sure the right number of words + # is there, and return the relevant words. 'action' is always + # defined: it's the first word of the line. Which of the other + # three are defined depends on the action; it'll be either + # patterns, (dir and patterns), or (dirpattern). + action, patterns, thedir, dirpattern = self._parse_directive(directive) + + # OK, now we know that the action is valid and we have the + # right number of words on the line for that action -- so we + # can proceed with minimal error-checking. + if action == 'include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=True): + logger.warning('no files found matching %r', pattern) + + elif action == 'exclude': + for pattern in patterns: + if not self._exclude_pattern(pattern, anchor=True): + logger.warning('no previously-included files ' + 'found matching %r', pattern) + + elif action == 'global-include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=False): + logger.warning('no files found matching %r ' + 'anywhere in distribution', pattern) + + elif action == 'global-exclude': + for pattern in patterns: + if not self._exclude_pattern(pattern, anchor=False): + logger.warning('no previously-included files ' + 'matching %r found anywhere in ' + 'distribution', pattern) + + elif action == 'recursive-include': + for pattern in patterns: + if not self._include_pattern(pattern, prefix=thedir): + logger.warning('no files found matching %r ' + 'under directory %r', pattern, thedir) + + elif action == 'recursive-exclude': + for pattern in patterns: + if not self._exclude_pattern(pattern, prefix=thedir): + logger.warning('no previously-included files ' + 'matching %r found under directory %r', + pattern, thedir) + + elif action == 'graft': + if not self._include_pattern(None, prefix=dirpattern): + logger.warning('no directories found matching %r', + dirpattern) + + elif action == 'prune': + if not self._exclude_pattern(None, prefix=dirpattern): + logger.warning('no previously-included directories found ' + 'matching %r', dirpattern) + else: #pragma: no cover + # This should never happen, as it should be caught in + # _parse_template_line + raise DistlibException( + 'invalid action %r' % action) + + # + # Private API + # + + def _parse_directive(self, directive): + """ + Validate a directive. + :param directive: The directive to validate. + :return: A tuple of action, patterns, thedir, dir_patterns + """ + words = directive.split() + if len(words) == 1 and words[0] not in ( + 'include', 'exclude', 'global-include', 'global-exclude', + 'recursive-include', 'recursive-exclude', 'graft', 'prune'): + # no action given, let's use the default 'include' + words.insert(0, 'include') + + action = words[0] + patterns = thedir = dir_pattern = None + + if action in ('include', 'exclude', + 'global-include', 'global-exclude'): + if len(words) < 2: + raise DistlibException( + '%r expects <pattern1> <pattern2> ...' % action) + + patterns = [convert_path(word) for word in words[1:]] + + elif action in ('recursive-include', 'recursive-exclude'): + if len(words) < 3: + raise DistlibException( + '%r expects <dir> <pattern1> <pattern2> ...' % action) + + thedir = convert_path(words[1]) + patterns = [convert_path(word) for word in words[2:]] + + elif action in ('graft', 'prune'): + if len(words) != 2: + raise DistlibException( + '%r expects a single <dir_pattern>' % action) + + dir_pattern = convert_path(words[1]) + + else: + raise DistlibException('unknown action %r' % action) + + return action, patterns, thedir, dir_pattern + + def _include_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Select strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. + + Patterns are not quite the same as implemented by the 'fnmatch' + module: '*' and '?' match non-special characters, where "special" + is platform-dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found. + """ + # XXX docstring lying about what the special chars are? + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + + # delayed loading of allfiles list + if self.allfiles is None: + self.findall() + + for name in self.allfiles: + if pattern_re.search(name): + self.files.add(name) + found = True + return found + + def _exclude_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Remove strings (presumably filenames) from 'files' that match + 'pattern'. + + Other parameters are the same as for 'include_pattern()', above. + The list 'self.files' is modified in place. Return True if files are + found. + + This API is public to allow e.g. exclusion of SCM subdirs, e.g. when + packaging source distributions + """ + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + for f in list(self.files): + if pattern_re.search(f): + self.files.remove(f) + found = True + return found + + + def _translate_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Translate a shell-like wildcard pattern to a compiled regular + expression. + + Return the compiled regex. If 'is_regex' true, + then 'pattern' is directly compiled to a regex (if it's a string) + or just returned as-is (assumes it's a regex object). + """ + if is_regex: + if isinstance(pattern, str): + return re.compile(pattern) + else: + return pattern + + if pattern: + pattern_re = self._glob_to_re(pattern) + else: + pattern_re = '' + + base = re.escape(os.path.join(self.base, '')) + if prefix is not None: + # ditch end of pattern character + empty_pattern = self._glob_to_re('') + prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] + sep = os.sep + if os.sep == '\\': + sep = r'\\' + pattern_re = '^' + base + sep.join((prefix_re, + '.*' + pattern_re)) + else: # no prefix -- respect anchor flag + if anchor: + pattern_re = '^' + base + pattern_re + + return re.compile(pattern_re) + + def _glob_to_re(self, pattern): + """Translate a shell-like glob pattern to a regular expression. + + Return a string containing the regex. Differs from + 'fnmatch.translate()' in that '*' does not match "special characters" + (which are platform-specific). + """ + pattern_re = fnmatch.translate(pattern) + + # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which + # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, + # and by extension they shouldn't match such "special characters" under + # any OS. So change all non-escaped dots in the RE to match any + # character except the special characters (currently: just os.sep). + sep = os.sep + if os.sep == '\\': + # we're using a regex to manipulate a regex, so we need + # to escape the backslash twice + sep = r'\\\\' + escaped = r'\1[^%s]' % sep + pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re) + return pattern_re diff --git a/awx/lib/site-packages/pip/vendor/distlib/markers.py b/awx/lib/site-packages/pip/vendor/distlib/markers.py new file mode 100644 index 0000000000..182474008a --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/markers.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Parser for the environment markers micro-language defined in PEP 345.""" + +import ast +import os +import sys +import platform + +from .compat import python_implementation, string_types +from .util import in_venv + +__all__ = ['interpret'] + +class Evaluator(object): + """ + A limited evaluator for Python expressions. + """ + + operators = { + 'eq': lambda x, y: x == y, + 'gt': lambda x, y: x > y, + 'gte': lambda x, y: x >= y, + 'in': lambda x, y: x in y, + 'lt': lambda x, y: x < y, + 'lte': lambda x, y: x <= y, + 'not': lambda x: not x, + 'noteq': lambda x, y: x != y, + 'notin': lambda x, y: x not in y, + } + + allowed_values = { + 'sys.platform': sys.platform, + 'python_version': '%s.%s' % sys.version_info[:2], + # parsing sys.platform is not reliable, but there is no other + # way to get e.g. 2.7.2+, and the PEP is defined with sys.version + 'python_full_version': sys.version.split(' ', 1)[0], + 'os.name': os.name, + 'platform.in_venv': str(in_venv()), + 'platform.version': platform.version(), + 'platform.machine': platform.machine(), + 'platform.python_implementation': platform.python_implementation(), + } + + def __init__(self, context=None): + """ + Initialise an instance. + + :param context: If specified, names are looked up in this mapping. + """ + self.context = context or {} + self.source = None + + def get_fragment(self, offset): + """ + Get the part of the source which is causing a problem. + """ + fragment_len = 10 + s = '%r' % (self.source[offset:offset + fragment_len]) + if offset + fragment_len < len(self.source): + s += '...' + return s + + def get_handler(self, node_type): + """ + Get a handler for the specified AST node type. + """ + return getattr(self, 'do_%s' % node_type, None) + + def evaluate(self, node, filename=None): + """ + Evaluate a source string or node, using ``filename`` when + displaying errors. + """ + if isinstance(node, string_types): + self.source = node + kwargs = {'mode': 'eval'} + if filename: + kwargs['filename'] = filename + try: + node = ast.parse(node, **kwargs) + except SyntaxError as e: + s = self.get_fragment(e.offset) + raise SyntaxError('syntax error %s' % s) + node_type = node.__class__.__name__.lower() + handler = self.get_handler(node_type) + if handler is None: + if self.source is None: + s = '(source not available)' + else: + s = self.get_fragment(node.col_offset) + raise SyntaxError("don't know how to evaluate %r %s" % ( + node_type, s)) + return handler(node) + + def get_attr_key(self, node): + assert isinstance(node, ast.Attribute), 'attribute node expected' + return '%s.%s' % (node.value.id, node.attr) + + def do_attribute(self, node): + valid = True + if not isinstance(node.value, ast.Name): + valid = False + else: + key = self.get_attr_key(node) + valid = key in self.context or key in self.allowed_values + if not valid: + raise SyntaxError('invalid expression: %s' % key) + if key in self.context: + result = self.context[key] + else: + result = self.allowed_values[key] + return result + + def do_boolop(self, node): + result = self.evaluate(node.values[0]) + is_or = node.op.__class__ is ast.Or + is_and = node.op.__class__ is ast.And + assert is_or or is_and + if (is_and and result) or (is_or and not result): + for n in node.values[1:]: + result = self.evaluate(n) + if (is_or and result) or (is_and and not result): + break + return result + + def do_compare(self, node): + def sanity_check(lhsnode, rhsnode): + valid = True + if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str): + valid = False + elif (isinstance(lhsnode, ast.Attribute) + and isinstance(rhsnode, ast.Attribute)): + klhs = self.get_attr_key(lhsnode) + krhs = self.get_attr_key(rhsnode) + valid = klhs != krhs + if not valid: + s = self.get_fragment(node.col_offset) + raise SyntaxError('Invalid comparison: %s' % s) + + lhsnode = node.left + lhs = self.evaluate(lhsnode) + result = True + for op, rhsnode in zip(node.ops, node.comparators): + sanity_check(lhsnode, rhsnode) + op = op.__class__.__name__.lower() + if op not in self.operators: + raise SyntaxError('unsupported operation: %r' % op) + rhs = self.evaluate(rhsnode) + result = self.operators[op](lhs, rhs) + if not result: + break + lhs = rhs + lhsnode = rhsnode + return result + + def do_expression(self, node): + return self.evaluate(node.body) + + def do_name(self, node): + valid = False + if node.id in self.context: + valid = True + result = self.context[node.id] + elif node.id in self.allowed_values: + valid = True + result = self.allowed_values[node.id] + if not valid: + raise SyntaxError('invalid expression: %s' % node.id) + return result + + def do_str(self, node): + return node.s + + +def interpret(marker, execution_context=None): + """ + Interpret a marker and return a result depending on environment. + + :param marker: The marker to interpret. + :type marker: str + :param execution_context: The context used for name lookup. + :type execution_context: mapping + """ + return Evaluator(execution_context).evaluate(marker.strip()) diff --git a/awx/lib/site-packages/pip/vendor/distlib/metadata.py b/awx/lib/site-packages/pip/vendor/distlib/metadata.py new file mode 100644 index 0000000000..51dbe853f6 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/metadata.py @@ -0,0 +1,708 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Implementation of the Metadata for Python packages PEPs. + +Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). +""" +from __future__ import unicode_literals + +import codecs +from email import message_from_file +import logging +import re + + +from . import DistlibException +from .compat import StringIO, string_types +from .markers import interpret +from .version import get_scheme + +logger = logging.getLogger(__name__) + +class MetadataMissingError(DistlibException): + """A required metadata is missing""" + + +class MetadataConflictError(DistlibException): + """Attempt to read or write metadata fields that are conflictual.""" + + +class MetadataUnrecognizedVersionError(DistlibException): + """Unknown metadata version number.""" + + +try: + # docutils is installed + from docutils.utils import Reporter + from docutils.parsers.rst import Parser + from docutils import frontend + from docutils import nodes + + class SilentReporter(Reporter, object): + + def __init__(self, source, report_level, halt_level, stream=None, + debug=0, encoding='ascii', error_handler='replace'): + self.messages = [] + super(SilentReporter, self).__init__( + source, report_level, halt_level, stream, + debug, encoding, error_handler) + + def system_message(self, level, message, *children, **kwargs): + self.messages.append((level, message, children, kwargs)) + return nodes.system_message(message, level=level, type=self. + levels[level], *children, **kwargs) + + _HAS_DOCUTILS = True +except ImportError: + # docutils is not installed + _HAS_DOCUTILS = False + +# public API of this module +__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] + +# Encoding used for the PKG-INFO files +PKG_INFO_ENCODING = 'utf-8' + +# preferred version. Hopefully will be changed +# to 1.2 once PEP 345 is supported everywhere +PKG_INFO_PREFERRED_VERSION = '1.1' + +_LINE_PREFIX = re.compile('\n \|') +_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License') + +_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License', 'Classifier', 'Download-URL', 'Obsoletes', + 'Provides', 'Requires') + +_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', + 'Download-URL') + +_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External') + +_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', + 'Obsoletes-Dist', 'Requires-External', 'Maintainer', + 'Maintainer-email', 'Project-URL') + +_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External', 'Private-Version', + 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', + 'Provides-Extra') + +_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', + 'Setup-Requires-Dist', 'Extension') + +_ALL_FIELDS = set() +_ALL_FIELDS.update(_241_FIELDS) +_ALL_FIELDS.update(_314_FIELDS) +_ALL_FIELDS.update(_345_FIELDS) +_ALL_FIELDS.update(_426_FIELDS) + +EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') + +def _version2fieldlist(version): + if version == '1.0': + return _241_FIELDS + elif version == '1.1': + return _314_FIELDS + elif version == '1.2': + return _345_FIELDS + elif version == '2.0': + return _426_FIELDS + raise MetadataUnrecognizedVersionError(version) + + +def _best_version(fields): + """Detect the best version depending on the fields used.""" + def _has_marker(keys, markers): + for marker in markers: + if marker in keys: + return True + return False + + keys = [] + for key, value in fields.items(): + if value in ([], 'UNKNOWN', None): + continue + keys.append(key) + + possible_versions = ['1.0', '1.1', '1.2', '2.0'] + + # first let's try to see if a field is not part of one of the version + for key in keys: + if key not in _241_FIELDS and '1.0' in possible_versions: + possible_versions.remove('1.0') + if key not in _314_FIELDS and '1.1' in possible_versions: + possible_versions.remove('1.1') + if key not in _345_FIELDS and '1.2' in possible_versions: + possible_versions.remove('1.2') + if key not in _426_FIELDS and '2.0' in possible_versions: + possible_versions.remove('2.0') + + # possible_version contains qualified versions + if len(possible_versions) == 1: + return possible_versions[0] # found ! + elif len(possible_versions) == 0: + raise MetadataConflictError('Unknown metadata set') + + # let's see if one unique marker is found + is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) + is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) + is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) + if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1: + raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields') + + # we have the choice, 1.0, or 1.2, or 2.0 + # - 1.0 has a broken Summary field but works with all tools + # - 1.1 is to avoid + # - 1.2 fixes Summary but has little adoption + # - 2.0 adds more features and is very new + if not is_1_1 and not is_1_2 and not is_2_0: + # we couldn't find any specific marker + if PKG_INFO_PREFERRED_VERSION in possible_versions: + return PKG_INFO_PREFERRED_VERSION + if is_1_1: + return '1.1' + if is_1_2: + return '1.2' + + return '2.0' + +_ATTR2FIELD = { + 'metadata_version': 'Metadata-Version', + 'name': 'Name', + 'version': 'Version', + 'platform': 'Platform', + 'supported_platform': 'Supported-Platform', + 'summary': 'Summary', + 'description': 'Description', + 'keywords': 'Keywords', + 'home_page': 'Home-page', + 'author': 'Author', + 'author_email': 'Author-email', + 'maintainer': 'Maintainer', + 'maintainer_email': 'Maintainer-email', + 'license': 'License', + 'classifier': 'Classifier', + 'download_url': 'Download-URL', + 'obsoletes_dist': 'Obsoletes-Dist', + 'provides_dist': 'Provides-Dist', + 'requires_dist': 'Requires-Dist', + 'setup_requires_dist': 'Setup-Requires-Dist', + 'requires_python': 'Requires-Python', + 'requires_external': 'Requires-External', + 'requires': 'Requires', + 'provides': 'Provides', + 'obsoletes': 'Obsoletes', + 'project_url': 'Project-URL', + 'private_version': 'Private-Version', + 'obsoleted_by': 'Obsoleted-By', + 'extension': 'Extension', + 'provides_extra': 'Provides-Extra', +} + +_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') +_VERSIONS_FIELDS = ('Requires-Python',) +_VERSION_FIELDS = ('Version',) +_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', + 'Requires', 'Provides', 'Obsoletes-Dist', + 'Provides-Dist', 'Requires-Dist', 'Requires-External', + 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', + 'Provides-Extra', 'Extension') +_LISTTUPLEFIELDS = ('Project-URL',) + +_ELEMENTSFIELD = ('Keywords',) + +_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') + +_MISSING = object() + +_FILESAFE = re.compile('[^A-Za-z0-9.]+') + + +class Metadata(object): + """The metadata of a release. + + Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can + instantiate the class with one of these arguments (or none): + - *path*, the path to a METADATA file + - *fileobj* give a file-like object with METADATA as content + - *mapping* is a dict-like object + - *scheme* is a version scheme name + """ + # TODO document that execution_context and platform_dependent are used + # to filter on query, not when setting a key + # also document the mapping API and UNKNOWN default key + + def __init__(self, path=None, platform_dependent=False, + execution_context=None, fileobj=None, mapping=None, + scheme='default'): + self._fields = {} + self.requires_files = [] + self.docutils_support = _HAS_DOCUTILS + self.platform_dependent = platform_dependent + self.execution_context = execution_context + self._dependencies = None + self.scheme = scheme + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + if path is not None: + self.read(path) + elif fileobj is not None: + self.read_file(fileobj) + elif mapping is not None: + self.update(mapping) + self.set_metadata_version() + + def set_metadata_version(self): + self._fields['Metadata-Version'] = _best_version(self._fields) + + def _write_field(self, file, name, value): + file.write('%s: %s\n' % (name, value)) + + def __getitem__(self, name): + return self.get(name) + + def __setitem__(self, name, value): + return self.set(name, value) + + def __delitem__(self, name): + field_name = self._convert_name(name) + try: + del self._fields[field_name] + except KeyError: + raise KeyError(name) + + def __contains__(self, name): + return (name in self._fields or + self._convert_name(name) in self._fields) + + def _convert_name(self, name): + if name in _ALL_FIELDS: + return name + name = name.replace('-', '_').lower() + return _ATTR2FIELD.get(name, name) + + def _default_value(self, name): + if name in _LISTFIELDS or name in _ELEMENTSFIELD: + return [] + return 'UNKNOWN' + + def _check_rst_data(self, data): + """Return warnings when the provided data has syntax errors.""" + source_path = StringIO() + parser = Parser() + settings = frontend.OptionParser().get_default_values() + settings.tab_width = 4 + settings.pep_references = None + settings.rfc_references = None + reporter = SilentReporter(source_path, + settings.report_level, + settings.halt_level, + stream=settings.warning_stream, + debug=settings.debug, + encoding=settings.error_encoding, + error_handler=settings.error_encoding_error_handler) + + document = nodes.document(settings, reporter, source=source_path) + document.note_source(source_path, -1) + try: + parser.parse(data, document) + except AttributeError: + reporter.messages.append((-1, 'Could not finish the parsing.', + '', {})) + + return reporter.messages + + def _platform(self, value): + if not self.platform_dependent or ';' not in value: + return True, value + value, marker = value.split(';') + return interpret(marker, self.execution_context), value + + def _remove_line_prefix(self, value): + return _LINE_PREFIX.sub('\n', value) + + def __getattr__(self, name): + if name in _ATTR2FIELD: + return self[name] + raise AttributeError(name) + + def _get_dependencies(self): + def handle_req(req, rlist, extras): + if ';' not in req: + rlist.append(req) + else: + r, marker = req.split(';') + m = EXTRA_RE.search(marker) + if m: + extra = m.groups()[0][1:-1] + extras.setdefault(extra, []).append(r) + + result = self._dependencies + if result is None: + self._dependencies = result = {} + extras = {} + setup_reqs = self['Setup-Requires-Dist'] + if setup_reqs: + result['setup'] = setup_reqs + install_reqs = [] + for req in self['Requires-Dist']: + handle_req(req, install_reqs, extras) + if install_reqs: + result['install'] = install_reqs + if extras: + result['extras'] = extras + return result + + def _set_dependencies(self, value): + if 'test' in value: + value = dict(value) # don't change value passed in + value.setdefault('extras', {})['test'] = value.pop('test') + self._dependencies = value + setup_reqs = value.get('setup', []) + install_reqs = value.get('install', []) + klist = [] + for k, rlist in value.get('extras', {}).items(): + klist.append(k) + for r in rlist: + install_reqs.append('%s; extra == "%s"' % (r, k)) + if setup_reqs: + self['Setup-Requires-Dist'] = setup_reqs + if install_reqs: + self['Requires-Dist'] = install_reqs + if klist: + self['Provides-Extra'] = klist + # + # Public API + # + + dependencies = property(_get_dependencies, _set_dependencies) + + def get_fullname(self, filesafe=False): + """Return the distribution name with version. + + If filesafe is true, return a filename-escaped form.""" + name, version = self['Name'], self['Version'] + if filesafe: + # For both name and version any runs of non-alphanumeric or '.' + # characters are replaced with a single '-'. Additionally any + # spaces in the version string become '.' + name = _FILESAFE.sub('-', name) + version = _FILESAFE.sub('-', version.replace(' ', '.')) + return '%s-%s' % (name, version) + + def is_field(self, name): + """return True if name is a valid metadata key""" + name = self._convert_name(name) + return name in _ALL_FIELDS + + def is_multi_field(self, name): + name = self._convert_name(name) + return name in _LISTFIELDS + + def read(self, filepath): + """Read the metadata values from a file path.""" + fp = codecs.open(filepath, 'r', encoding='utf-8') + try: + self.read_file(fp) + finally: + fp.close() + + def read_file(self, fileob): + """Read the metadata values from a file object.""" + msg = message_from_file(fileob) + self._fields['Metadata-Version'] = msg['metadata-version'] + + for field in _version2fieldlist(self['Metadata-Version']): + if field in _LISTFIELDS: + # we can have multiple lines + values = msg.get_all(field) + if field in _LISTTUPLEFIELDS and values is not None: + values = [tuple(value.split(',')) for value in values] + self.set(field, values) + else: + # single line + value = msg[field] + if value is not None and value != 'UNKNOWN': + self.set(field, value) + self.set_metadata_version() + + def write(self, filepath, skip_unknown=False): + """Write the metadata fields to filepath.""" + fp = codecs.open(filepath, 'w', encoding='utf-8') + try: + self.write_file(fp, skip_unknown) + finally: + fp.close() + + def write_file(self, fileobject, skip_unknown=False): + """Write the PKG-INFO format data to a file object.""" + self.set_metadata_version() + + for field in _version2fieldlist(self['Metadata-Version']): + values = self.get(field) + if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): + continue + if field in _ELEMENTSFIELD: + self._write_field(fileobject, field, ','.join(values)) + continue + if field not in _LISTFIELDS: + if field == 'Description': + values = values.replace('\n', '\n |') + values = [values] + + if field in _LISTTUPLEFIELDS: + values = [','.join(value) for value in values] + + for value in values: + self._write_field(fileobject, field, value) + + def update(self, other=None, **kwargs): + """Set metadata values from the given iterable `other` and kwargs. + + Behavior is like `dict.update`: If `other` has a ``keys`` method, + they are looped over and ``self[key]`` is assigned ``other[key]``. + Else, ``other`` is an iterable of ``(key, value)`` iterables. + + Keys that don't match a metadata field or that have an empty value are + dropped. + """ + def _set(key, value): + if key in _ATTR2FIELD and value: + self.set(self._convert_name(key), value) + + if not other: + # other is None or empty container + pass + elif hasattr(other, 'keys'): + for k in other.keys(): + _set(k, other[k]) + else: + for k, v in other: + _set(k, v) + + if kwargs: + for k, v in kwargs.items(): + _set(k, v) + + def set(self, name, value): + """Control then set a metadata field.""" + name = self._convert_name(name) + + if ((name in _ELEMENTSFIELD or name == 'Platform') and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [v.strip() for v in value.split(',')] + else: + value = [] + elif (name in _LISTFIELDS and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [value] + else: + value = [] + + if logger.isEnabledFor(logging.WARNING): + project_name = self['Name'] + + scheme = get_scheme(self.scheme) + if name in _PREDICATE_FIELDS and value is not None: + for v in value: + # check that the values are valid + if not scheme.is_valid_matcher(v.split(';')[0]): + logger.warning( + '%r: %r is not valid (field %r)', + project_name, v, name) + # FIXME this rejects UNKNOWN, is that right? + elif name in _VERSIONS_FIELDS and value is not None: + if not scheme.is_valid_constraint_list(value): + logger.warning('%r: %r is not a valid version (field %r)', + project_name, value, name) + elif name in _VERSION_FIELDS and value is not None: + if not scheme.is_valid_version(value): + logger.warning('%r: %r is not a valid version (field %r)', + project_name, value, name) + + if name in _UNICODEFIELDS: + if name == 'Description': + value = self._remove_line_prefix(value) + + self._fields[name] = value + + def get(self, name, default=_MISSING): + """Get a metadata field.""" + name = self._convert_name(name) + if name not in self._fields: + if default is _MISSING: + default = self._default_value(name) + return default + if name in _UNICODEFIELDS: + value = self._fields[name] + return value + elif name in _LISTFIELDS: + value = self._fields[name] + if value is None: + return [] + res = [] + for val in value: + valid, val = self._platform(val) + if not valid: + continue + if name not in _LISTTUPLEFIELDS: + res.append(val) + else: + # That's for Project-URL + res.append((val[0], val[1])) + return res + + elif name in _ELEMENTSFIELD: + valid, value = self._platform(self._fields[name]) + if not valid: + return [] + if isinstance(value, string_types): + return value.split(',') + valid, value = self._platform(self._fields[name]) + if not valid: + return None + return value + + def check(self, strict=False, restructuredtext=False): + """Check if the metadata is compliant. If strict is True then raise if + no Name or Version are provided""" + self.set_metadata_version() + + # XXX should check the versions (if the file was loaded) + missing, warnings = [], [] + + for attr in ('Name', 'Version'): # required by PEP 345 + if attr not in self: + missing.append(attr) + + if strict and missing != []: + msg = 'missing required metadata: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + + for attr in ('Home-page', 'Author'): + if attr not in self: + missing.append(attr) + + if _HAS_DOCUTILS and restructuredtext: + warnings.extend(self._check_rst_data(self['Description'])) + + # checking metadata 1.2 (XXX needs to check 1.1, 1.0) + if self['Metadata-Version'] != '1.2': + return missing, warnings + + scheme = get_scheme(self.scheme) + + def are_valid_constraints(value): + for v in value: + if not scheme.is_valid_matcher(v.split(';')[0]): + return False + return True + + for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), + (_VERSIONS_FIELDS, + scheme.is_valid_constraint_list), + (_VERSION_FIELDS, + scheme.is_valid_version)): + for field in fields: + value = self.get(field, None) + if value is not None and not controller(value): + warnings.append('Wrong value for %r: %s' % (field, value)) + + return missing, warnings + + def todict(self, skip_missing=False): + """Return fields as a dict. + + Field names will be converted to use the underscore-lowercase style + instead of hyphen-mixed case (i.e. home_page instead of Home-page). + """ + self.set_metadata_version() + + mapping_1_0 = ( + ('metadata_version', 'Metadata-Version'), + ('name', 'Name'), + ('version', 'Version'), + ('summary', 'Summary'), + ('home_page', 'Home-page'), + ('author', 'Author'), + ('author_email', 'Author-email'), + ('license', 'License'), + ('description', 'Description'), + ('keywords', 'Keywords'), + ('platform', 'Platform'), + ('classifier', 'Classifier'), + ('download_url', 'Download-URL'), + ) + + data = {} + for key, field_name in mapping_1_0: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + if self['Metadata-Version'] == '1.2': + mapping_1_2 = ( + ('requires_dist', 'Requires-Dist'), + ('requires_python', 'Requires-Python'), + ('requires_external', 'Requires-External'), + ('provides_dist', 'Provides-Dist'), + ('obsoletes_dist', 'Obsoletes-Dist'), + ('project_url', 'Project-URL'), + ) + for key, field_name in mapping_1_2: + if not skip_missing or field_name in self._fields: + if key != 'project_url': + data[key] = self[field_name] + else: + data[key] = [','.join(u) for u in self[field_name]] + + elif self['Metadata-Version'] == '1.1': + mapping_1_1 = ( + ('provides', 'Provides'), + ('requires', 'Requires'), + ('obsoletes', 'Obsoletes'), + ) + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + return data + + # Mapping API + # TODO could add iter* variants + + def keys(self): + return list(_version2fieldlist(self['Metadata-Version'])) + + def __iter__(self): + for key in self.keys(): + yield key + + def values(self): + return [self[key] for key in self.keys()] + + def items(self): + return [(key, self[key]) for key in self.keys()] + + def __repr__(self): + return '<Metadata %s %s>' % (self.name, self.version) diff --git a/awx/lib/site-packages/pip/vendor/distlib/resources.py b/awx/lib/site-packages/pip/vendor/distlib/resources.py new file mode 100644 index 0000000000..5f2d212b17 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/resources.py @@ -0,0 +1,304 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import bisect +import io +import logging +import os +import shutil +import sys +import zipimport + +from . import DistlibException +from .util import cached_property, get_cache_base, path_to_cache_dir + +logger = logging.getLogger(__name__) + +class Cache(object): + """ + A class implementing a cache for resources that need to live in the file system + e.g. shared libraries. + """ + + def __init__(self, base=None): + """ + Initialise an instance. + + :param base: The base directory where the cache should be located. If + not specified, this will be the ``resource-cache`` + directory under whatever :func:`get_cache_base` returns. + """ + if base is None: + base = os.path.join(get_cache_base(), 'resource-cache') + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if not os.path.isdir(base): + os.makedirs(base) + self.base = os.path.abspath(os.path.normpath(base)) + + def prefix_to_dir(self, prefix): + """ + Converts a resource prefix to a directory name in the cache. + """ + return path_to_cache_dir(prefix) + + def is_stale(self, resource, path): + """ + Is the cache stale for the given resource? + + :param resource: The :class:`Resource` being cached. + :param path: The path of the resource in the cache. + :return: True if the cache is stale. + """ + # Cache invalidation is a hard problem :-) + return True + + def get(self, resource): + """ + Get a resource into the cache, + + :param resource: A :class:`Resource` instance. + :return: The pathname of the resource in the cache. + """ + prefix, path = resource.finder.get_cache_info(resource) + if prefix is None: + result = path + else: + result = os.path.join(self.base, self.prefix_to_dir(prefix), path) + dirname = os.path.dirname(result) + if not os.path.isdir(dirname): + os.makedirs(dirname) + if not os.path.exists(result): + stale = True + else: + stale = self.is_stale(resource, path) + if stale: + # write the bytes of the resource to the cache location + with open(result, 'wb') as f: + f.write(resource.bytes) + return result + + def clear(self): + """ + Clear the cache. + """ + not_removed = [] + for fn in os.listdir(self.base): + fn = os.path.join(self.base, fn) + try: + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + except Exception: + not_removed.append(fn) + return not_removed + +cache = Cache() + +class ResourceBase(object): + def __init__(self, finder, name): + self.finder = finder + self.name = name + +class Resource(ResourceBase): + """ + A class representing an in-package resource, such as a data file. This is + not normally instantiated by user code, but rather by a + :class:`ResourceFinder` which manages the resource. + """ + is_container = False # Backwards compatibility + + def as_stream(self): + "Get the resource as a stream. Not a property, as not idempotent." + return self.finder.get_stream(self) + + @cached_property + def file_path(self): + return cache.get(self) + + @cached_property + def bytes(self): + return self.finder.get_bytes(self) + + @cached_property + def size(self): + return self.finder.get_size(self) + +class ResourceContainer(ResourceBase): + is_container = True # Backwards compatibility + + @cached_property + def resources(self): + return self.finder.get_resources(self) + +class ResourceFinder(object): + """ + Resource finder for file system resources. + """ + def __init__(self, module): + self.module = module + self.loader = getattr(module, '__loader__', None) + self.base = os.path.dirname(getattr(module, '__file__', '')) + + def _make_path(self, resource_name): + parts = resource_name.split('/') + parts.insert(0, self.base) + return os.path.join(*parts) + + def _find(self, path): + return os.path.exists(path) + + def get_cache_info(self, resource): + return None, resource.path + + def find(self, resource_name): + path = self._make_path(resource_name) + if not self._find(path): + result = None + else: + if self._is_directory(path): + result = ResourceContainer(self, resource_name) + else: + result = Resource(self, resource_name) + result.path = path + return result + + def get_stream(self, resource): + return open(resource.path, 'rb') + + def get_bytes(self, resource): + with open(resource.path, 'rb') as f: + return f.read() + + def get_size(self, resource): + return os.path.getsize(resource.path) + + def get_resources(self, resource): + def allowed(f): + return f != '__pycache__' and not f.endswith(('.pyc', '.pyo')) + return set([f for f in os.listdir(resource.path) if allowed(f)]) + + def is_container(self, resource): + return self._is_directory(resource.path) + + _is_directory = staticmethod(os.path.isdir) + +class ZipResourceFinder(ResourceFinder): + """ + Resource finder for resources in .zip files. + """ + def __init__(self, module): + super(ZipResourceFinder, self).__init__(module) + archive = self.loader.archive + self.prefix_len = 1 + len(archive) + # PyPy doesn't have a _files attr on zipimporter, and you can't set one + if hasattr(self.loader, '_files'): + self._files = self.loader._files + else: + self._files = zipimport._zip_directory_cache[archive] + self.index = sorted(self._files) + + def _find(self, path): + path = path[self.prefix_len:] + if path in self._files: + result = True + else: + if path[-1] != os.sep: + path = path + os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + if not result: + logger.debug('_find failed: %r %r', path, self.loader.prefix) + else: + logger.debug('_find worked: %r %r', path, self.loader.prefix) + return result + + def get_cache_info(self, resource): + prefix = self.loader.archive + path = resource.path[1 + len(prefix):] + return prefix, path + + def get_bytes(self, resource): + return self.loader.get_data(resource.path) + + def get_stream(self, resource): + return io.BytesIO(self.get_bytes(resource)) + + def get_size(self, resource): + path = resource.path[self.prefix_len:] + return self._files[path][3] + + def get_resources(self, resource): + path = resource.path[self.prefix_len:] + if path[-1] != os.sep: + path += os.sep + plen = len(path) + result = set() + i = bisect.bisect(self.index, path) + while i < len(self.index): + if not self.index[i].startswith(path): + break + s = self.index[i][plen:] + result.add(s.split(os.sep, 1)[0]) # only immediate children + i += 1 + return result + + def _is_directory(self, path): + path = path[self.prefix_len:] + if path[-1] != os.sep: + path += os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + return result + +_finder_registry = { + type(None): ResourceFinder, + zipimport.zipimporter: ZipResourceFinder +} + +try: + import _frozen_importlib + _finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder +except (ImportError, AttributeError): + pass + +def register_finder(loader, finder_maker): + _finder_registry[type(loader)] = finder_maker + +_finder_cache = {} + +def finder(package): + """ + Return a resource finder for a package. + :param package: The name of the package. + :return: A :class:`ResourceFinder` instance for the package. + """ + if package in _finder_cache: + result = _finder_cache[package] + else: + if package not in sys.modules: + __import__(package) + module = sys.modules[package] + path = getattr(module, '__path__', None) + if path is None: + raise DistlibException('You cannot get a finder for a module, ' + 'only for a package') + loader = getattr(module, '__loader__', None) + finder_maker = _finder_registry.get(type(loader)) + if finder_maker is None: + raise DistlibException('Unable to locate finder for %r' % package) + result = finder_maker(module) + _finder_cache[package] = result + return result diff --git a/awx/lib/site-packages/pip/vendor/distlib/scripts.py b/awx/lib/site-packages/pip/vendor/distlib/scripts.py new file mode 100644 index 0000000000..be923f61b4 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/scripts.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import logging +import os +import re +import struct +import sys + +from . import DistlibException +from .compat import sysconfig, fsencode, detect_encoding +from .resources import finder +from .util import FileOperator, get_export_entry, convert_path, get_executable + +logger = logging.getLogger(__name__) + +# check if Python is called on the first line with this expression +FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') +SCRIPT_TEMPLATE = '''%(shebang)s +if __name__ == '__main__': + import sys, re + + def _resolve(module, func): + __import__(module) + mod = sys.modules[module] + parts = func.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + try: + sys.argv[0] = re.sub('-script.pyw?$', '', sys.argv[0]) + + func = _resolve('%(module)s', '%(func)s') + rc = func() # None interpreted as 0 + except Exception as e: # only supporting Python >= 2.6 + sys.stderr.write('%%s\\n' %% e) + rc = 1 + sys.exit(rc) +''' + + +class ScriptMaker(object): + """ + A class to copy or create scripts from source scripts or callable + specifications. + """ + script_template = SCRIPT_TEMPLATE + + executable = None # for shebangs + + def __init__(self, source_dir, target_dir, add_launchers=True, + dry_run=False, fileop=None): + self.source_dir = source_dir + self.target_dir = target_dir + self.add_launchers = add_launchers + self.force = False + self.set_mode = False + self._fileop = fileop or FileOperator(dry_run) + + def _get_alternate_executable(self, executable, flags): + if 'gui' in flags and os.name == 'nt': + dn, fn = os.path.split(executable) + fn = fn.replace('python', 'pythonw') + executable = os.path.join(dn, fn) + return executable + + def _get_shebang(self, encoding, post_interp=b'', flags=None): + if self.executable: + executable = self.executable + elif not sysconfig.is_python_build(): + executable = get_executable() + elif hasattr(sys, 'base_prefix') and sys.prefix != sys.base_prefix: + executable = os.path.join( + sysconfig.get_path('scripts'), + 'python%s' % sysconfig.get_config_var('EXE')) + else: + executable = os.path.join( + sysconfig.get_config_var('BINDIR'), + 'python%s%s' % (sysconfig.get_config_var('VERSION'), + sysconfig.get_config_var('EXE'))) + if flags: + executable = self._get_alternate_executable(executable, flags) + + executable = fsencode(executable) + shebang = b'#!' + executable + post_interp + b'\n' + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: + raise ValueError( + 'The shebang (%r) is not decodable from utf-8' % shebang) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + if encoding != 'utf-8': + try: + shebang.decode(encoding) + except UnicodeDecodeError: + raise ValueError( + 'The shebang (%r) is not decodable ' + 'from the script encoding (%r)' % (shebang, encoding)) + return shebang + + def _get_script_text(self, shebang, entry): + return self.script_template % dict(shebang=shebang, + module=entry.prefix, + func=entry.suffix) + + def _make_script(self, entry, filenames): + shebang = self._get_shebang('utf-8', flags=entry.flags).decode('utf-8') + script = self._get_script_text(shebang, entry) + outname = os.path.join(self.target_dir, entry.name) + use_launcher = self.add_launchers and os.name == 'nt' + if use_launcher: + exename = '%s.exe' % outname + if 'gui' in entry.flags: + ext = 'pyw' + launcher = self._get_launcher('w') + else: + ext = 'py' + launcher = self._get_launcher('t') + outname = '%s-script.%s' % (outname, ext) + self._fileop.write_text_file(outname, script, 'utf-8') + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + if use_launcher: + self._fileop.write_binary_file(exename, launcher) + filenames.append(exename) + + def _copy_script(self, script, filenames): + adjust = False + script = convert_path(script) + outname = os.path.join(self.target_dir, os.path.basename(script)) + filenames.append(outname) + script = os.path.join(self.source_dir, script) + if not self.force and not self._fileop.newer(script, outname): + logger.debug('not copying %s (up-to-date)', script) + return + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, 'rb') + except IOError: + if not self.dry_run: + raise + f = None + else: + encoding, lines = detect_encoding(f.readline) + f.seek(0) + first_line = f.readline() + if not first_line: + logger.warning('%s: %s is an empty file (skipping)', + self.get_command_name(), script) + return + + match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if not adjust: + if f: + f.close() + self._fileop.copy_file(script, outname) + else: + logger.info('copying and adjusting %s -> %s', script, + self.target_dir) + if not self._fileop.dry_run: + shebang = self._get_shebang(encoding, post_interp) + use_launcher = self.add_launchers and os.name == 'nt' + if use_launcher: + n, e = os.path.splitext(outname) + exename = n + '.exe' + if b'pythonw' in first_line: + launcher = self._get_launcher('w') + suffix = '-script.pyw' + else: + launcher = self._get_launcher('t') + suffix = '-script.py' + outname = n + suffix + filenames[-1] = outname + self._fileop.write_binary_file(outname, shebang + f.read()) + if use_launcher: + self._fileop.write_binary_file(exename, launcher) + filenames.append(exename) + if f: + f.close() + if self.set_mode: + self._fileop.set_executable_mode([outname]) + + @property + def dry_run(self): + return self._fileop.dry_run + + @dry_run.setter + def dry_run(self, value): + self._fileop.dry_run = value + + if os.name == 'nt': + # Executable launcher support. + # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ + + def _get_launcher(self, kind): + if struct.calcsize('P') == 8: # 64-bit + bits = '64' + else: + bits = '32' + name = '%s%s.exe' % (kind, bits) + result = finder('distlib').find(name).bytes + return result + + # Public API follows + + def make(self, specification): + """ + Make a script. + + :param specification: The specification, which is either a valid export + entry specification (to make a script from a + callable) or a filename (to make a script by + copying from a source location). + :return: A list of all absolute pathnames written to, + """ + filenames = [] + entry = get_export_entry(specification) + if entry is None: + self._copy_script(specification, filenames) + else: + self._make_script(entry, filenames) + return filenames + + def make_multiple(self, specifications): + """ + Take a list of specifications and make scripts from them, + :param specifications: A list of specifications. + :return: A list of all absolute pathnames written to, + """ + filenames = [] + for specification in specifications: + filenames.extend(self.make(specification)) + return filenames diff --git a/awx/lib/site-packages/pip/vendor/distlib/util.py b/awx/lib/site-packages/pip/vendor/distlib/util.py new file mode 100644 index 0000000000..f44e8c3528 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/util.py @@ -0,0 +1,1313 @@ +# +# Copyright (C) 2012-2013 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import codecs +from collections import deque +import contextlib +import csv +from glob import iglob as std_iglob +import io +import json +import logging +import os +import py_compile +import re +import shutil +import socket +import ssl +import sys +import tarfile +import tempfile +import time +import zipfile + +from . import DistlibException +from .compat import (string_types, text_type, shutil, raw_input, + cache_from_source, urlopen, httplib, xmlrpclib, splittype, + HTTPHandler, HTTPSHandler as BaseHTTPSHandler, + URLError, match_hostname, CertificateError) + +logger = logging.getLogger(__name__) + +class Container(object): + """ + A generic container for when multiple values need to be returned + """ + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + +# +# Requirement parsing code for name + optional constraints + optional extras +# +# e.g. 'foo >= 1.2, < 2.0 [bar, baz]' +# +# The regex can seem a bit hairy, so we build it up out of smaller pieces +# which are manageable. +# + +COMMA = r'\s*,\s*' +COMMA_RE = re.compile(COMMA) + +IDENT = r'(\w|[.-])+' +RELOP = '([<>=!]=)|[<>]' + +# +# The first relop is optional - if absent, will be taken as '==' +# +BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + IDENT + ')(' + COMMA + '(' + + RELOP + r')\s*(' + IDENT + '))*') + +# +# Either the bare constraints or the bare constraints in parentheses +# +CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + r')\s*\)|(?P<c2>' + + BARE_CONSTRAINTS + '\s*)') + +EXTRA_LIST = IDENT + '(' + COMMA + IDENT + ')*' +EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]' +REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' + + CONSTRAINTS + ')?$') +REQUIREMENT_RE = re.compile(REQUIREMENT) + +# +# Used to scan through the constraints +# +RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + IDENT + ')' +RELOP_IDENT_RE = re.compile(RELOP_IDENT) + +def parse_requirement(s): + + def get_constraint(m): + d = m.groupdict() + return d['op'], d['vn'] + + result = None + m = REQUIREMENT_RE.match(s) + if m: + d = m.groupdict() + name = d['dn'] + cons = d['c1'] or d['c2'] + if not cons: + cons = None + constr = '' + rs = d['dn'] + else: + if cons[0] not in '<>!=': + cons = '==' + cons + iterator = RELOP_IDENT_RE.finditer(cons) + cons = [get_constraint(m) for m in iterator] + rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons])) + if not d['ex']: + extras = None + else: + extras = COMMA_RE.split(d['ex']) + result = Container(name=name, constraints=cons, extras=extras, + requirement=rs, source=s) + return result + + +def get_resources_dests(resources_root, rules): + """Find destinations for resources files""" + + def get_rel_path(base, path): + # normalizes and returns a lstripped-/-separated path + base = base.replace(os.path.sep, '/') + path = path.replace(os.path.sep, '/') + assert path.startswith(base) + return path[len(base):].lstrip('/') + + + destinations = {} + for base, suffix, dest in rules: + prefix = os.path.join(resources_root, base) + for abs_base in iglob(prefix): + abs_glob = os.path.join(abs_base, suffix) + for abs_path in iglob(abs_glob): + resource_file = get_rel_path(resources_root, abs_path) + if dest is None: # remove the entry if it was here + destinations.pop(resource_file, None) + else: + rel_path = get_rel_path(abs_base, abs_path) + rel_dest = dest.replace(os.path.sep, '/').rstrip('/') + destinations[resource_file] = rel_dest + '/' + rel_path + return destinations + + +def in_venv(): + if hasattr(sys, 'real_prefix'): + # virtualenv venvs + result = True + else: + # PEP 405 venvs + result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) + return result + + +def get_executable(): + if sys.platform == 'darwin' and ('__VENV_LAUNCHER__' + in os.environ): + result = os.environ['__VENV_LAUNCHER__'] + else: + result = sys.executable + return result + + +def proceed(prompt, allowed_chars, error_prompt=None, default=None): + p = prompt + while True: + s = raw_input(p) + p = prompt + if not s and default: + s = default + if s: + c = s[0].lower() + if c in allowed_chars: + break + if error_prompt: + p = '%c: %s\n%s' % (c, error_prompt, prompt) + return c + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + +@contextlib.contextmanager +def chdir(d): + cwd = os.getcwd() + try: + os.chdir(d) + yield + finally: + os.chdir(cwd) + + +@contextlib.contextmanager +def socket_timeout(seconds=15): + cto = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(seconds) + yield + finally: + socket.setdefaulttimeout(cto) + + +class cached_property(object): + def __init__(self, func): + self.func = func + #for attr in ('__name__', '__module__', '__doc__'): + # setattr(self, attr, getattr(func, attr, None)) + + def __get__(self, obj, type=None): + if obj is None: + return self + value = self.func(obj) + object.__setattr__(obj, self.func.__name__, value) + #obj.__dict__[self.func.__name__] = value = self.func(obj) + return value + +def convert_path(pathname): + """Return 'pathname' as a name that will work on the native filesystem. + + The path is split on '/' and put back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + """ + if os.sep == '/': + return pathname + if not pathname: + return pathname + if pathname[0] == '/': + raise ValueError("path '%s' cannot be absolute" % pathname) + if pathname[-1] == '/': + raise ValueError("path '%s' cannot end with '/'" % pathname) + + paths = pathname.split('/') + while os.curdir in paths: + paths.remove(os.curdir) + if not paths: + return os.curdir + return os.path.join(*paths) + + +class FileOperator(object): + def __init__(self, dry_run=False): + self.dry_run = dry_run + self.ensured = set() + self._init_record() + + def _init_record(self): + self.record = False + self.files_written = set() + self.dirs_created = set() + + def record_as_written(self, path): + if self.record: + self.files_written.add(path) + + def newer(self, source, target): + """Tell if the target is newer than the source. + + Returns true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. + + Returns false if both exist and 'target' is the same age or younger + than 'source'. Raise PackagingFileError if 'source' does not exist. + + Note that this test is not very accurate: files created in the same + second will have the same "age". + """ + if not os.path.exists(source): + raise DistlibException("file '%r' does not exist" % + os.path.abspath(source)) + if not os.path.exists(target): + return True + + return os.stat(source).st_mtime > os.stat(target).st_mtime + + def copy_file(self, infile, outfile): + """Copy a file respecting dry-run and force flags. + """ + assert not os.path.isdir(outfile) + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying %s to %s', infile, outfile) + if not self.dry_run: + shutil.copyfile(infile, outfile) + if self.record: + self.files_written.add(outfile) + + def copy_stream(self, instream, outfile, encoding=None): + assert not os.path.isdir(outfile) + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying stream %s to %s', instream, outfile) + if not self.dry_run: + if encoding is None: + outstream = open(outfile, 'wb') + else: + outstream = codecs.open(outfile, 'w', encoding=encoding) + try: + shutil.copyfileobj(instream, outstream) + finally: + outstream.close() + if self.record: + self.files_written.add(outfile) + + def write_binary_file(self, path, data): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + with open(path, 'wb') as f: + f.write(data) + if self.record: + self.files_written.add(path) + + def write_text_file(self, path, data, encoding): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + with open(path, 'wb') as f: + f.write(data.encode(encoding)) + if self.record: + self.files_written.add(path) + + def set_mode(self, bits, mask, files): + if os.name == 'posix': + # Set the executable bits (owner, group, and world) on + # all the files specified. + for f in files: + if self.dry_run: + logger.info("changing mode of %s", f) + else: + mode = (os.stat(f).st_mode | bits) & mask + logger.info("changing mode of %s to %o", f, mode) + os.chmod(f, mode) + + set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) + + def ensure_dir(self, path): + path = os.path.abspath(path) + if path not in self.ensured and not os.path.exists(path): + self.ensured.add(path) + d, f = os.path.split(path) + self.ensure_dir(d) + logger.info('Creating %s' % path) + if not self.dry_run: + os.mkdir(path) + if self.record: + self.dirs_created.add(path) + + def byte_compile(self, path, optimize=False, force=False, prefix=None): + dpath = cache_from_source(path, not optimize) + logger.info('Byte-compiling %s to %s', path, dpath) + if not self.dry_run: + if force or self.newer(path, dpath): + if not prefix: + diagpath = None + else: + assert path.startswith(prefix) + diagpath = path[len(prefix):] + py_compile.compile(path, dpath, diagpath, True) # raise on error + if self.record: + self.files_written.add(dpath) + return dpath + + def ensure_removed(self, path): + if os.path.exists(path): + if os.path.isdir(path) and not os.path.islink(path): + logger.debug('Removing directory tree at %s', path) + if not self.dry_run: + shutil.rmtree(path) + if self.record: + if path in self.dirs_created: + self.dirs_created.remove(path) + else: + if os.path.islink(path): + s = 'link' + else: + s = 'file' + logger.debug('Removing %s %s', s, path) + if not self.dry_run: + os.remove(path) + if self.record: + if path in self.files_written: + self.files_written.remove(path) + + def is_writable(self, path): + result = False + while not result: + if os.path.exists(path): + result = os.access(path, os.W_OK) + break + parent = os.path.dirname(path) + if parent == path: + break + path = parent + return result + + def commit(self): + """ + Commit recorded changes, turn off recording, return + changes. + """ + assert self.record + result = self.files_written, self.dirs_created + self._init_record() + return result + + def rollback(self): + if not self.dry_run: + for f in list(self.files_written): + if os.path.exists(f): + os.remove(f) + # dirs should all be empty now, except perhaps for + # __pycache__ subdirs + # reverse so that subdirs appear before their parents + dirs = sorted(self.dirs_created, reverse=True) + for d in dirs: + flist = os.listdir(d) + if flist: + assert flist == ['__pycache__'] + sd = os.path.join(d, flist[0]) + os.rmdir(sd) + os.rmdir(d) # should fail if non-empty + self._init_record() + +def resolve(module_name, dotted_path): + if module_name in sys.modules: + mod = sys.modules[module_name] + else: + mod = __import__(module_name) + if dotted_path is None: + result = mod + else: + parts = dotted_path.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + +class ExportEntry(object): + def __init__(self, name, prefix, suffix, flags): + self.name = name + self.prefix = prefix + self.suffix = suffix + self.flags = flags + + @cached_property + def value(self): + return resolve(self.prefix, self.suffix) + + def __repr__(self): + return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, + self.suffix, self.flags) + + def __eq__(self, other): + if not isinstance(other, ExportEntry): + result = False + else: + result = (self.name == other.name and + self.prefix == other.prefix and + self.suffix == other.suffix and + self.flags == other.flags) + return result + + __hash__ = object.__hash__ + + +ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+) + \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) + \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + ''', re.VERBOSE) + + +def get_export_entry(specification): + m = ENTRY_RE.search(specification) + if not m: + result = None + if '[' in specification or ']' in specification: + raise DistlibException('Invalid specification ' + '%r' % specification) + else: + d = m.groupdict() + name = d['name'] + path = d['callable'] + colons = path.count(':') + if colons == 0: + prefix, suffix = path, None + else: + if colons != 1: + raise DistlibException('Invalid specification ' + '%r' % specification) + prefix, suffix = path.split(':') + flags = d['flags'] + if flags is None: + if '[' in specification or ']' in specification: + raise DistlibException('Invalid specification ' + '%r' % specification) + flags = [] + else: + flags = [f.strip() for f in flags.split(',')] + result = ExportEntry(name, prefix, suffix, flags) + return result + + +def get_cache_base(suffix=None): + """ + Return the default base location for distlib caches. If the directory does + not exist, it is created. Use the suffix provided for the base directory, + and default to '.distlib' if it isn't provided. + + On Windows, if LOCALAPPDATA is defined in the environment, then it is + assumed to be a directory, and will be the parent directory of the result. + On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home + directory - using os.expanduser('~') - will be the parent directory of + the result. + + The result is just the directory '.distlib' in the parent directory as + determined above, or with the name specified with ``suffix``. + """ + if suffix is None: + suffix = '.distlib' + if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: + result = os.path.expandvars('$localappdata') + else: + # Assume posix, or old Windows + result = os.path.expanduser('~') + result = os.path.join(result, suffix) + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if not os.path.isdir(result): + os.makedirs(result) + return result + + +def path_to_cache_dir(path): + """ + Convert an absolute path to a directory name for use in a cache. + + The algorithm used is: + + #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. + #. Any occurrence of ``os.sep`` is replaced with ``'--'``. + #. ``'.cache'`` is appended. + """ + d, p = os.path.splitdrive(os.path.abspath(path)) + if d: + d = d.replace(':', '---') + p = p.replace(os.sep, '--') + return d + p + '.cache' + + +def ensure_slash(s): + if not s.endswith('/'): + return s + '/' + return s + + +def parse_credentials(netloc): + username = password = None + if '@' in netloc: + prefix, netloc = netloc.split('@', 1) + if ':' not in prefix: + username = prefix + else: + username, password = prefix.split(':', 1) + return username, password, netloc + + +def get_process_umask(): + result = os.umask(0o22) + os.umask(result) + return result + +def is_string_sequence(seq): + result = True + i = None + for i, s in enumerate(seq): + if not isinstance(s, string_types): + result = False + break + assert i is not None + return result + +PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' + '([0-9][a-z0-9_.+-]*)', re.I) +PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)$') + + +def split_filename(filename, project_name=None): + """ + Extract name, version, python version from a filename (no extension) + + Return name, version, pyver or None + """ + result = None + pyver = None + m = PYTHON_VERSION.search(filename) + if m: + pyver = m.group(1) + filename = filename[:m.start()] + if project_name and len(filename) > len(project_name) + 1: + m = re.match(re.escape(project_name) + r'\b', filename) + if m: + n = m.end() + result = filename[:n], filename[n + 1:], pyver + if result is None: + m = PROJECT_NAME_AND_VERSION.match(filename) + if m: + result = m.group(1), m.group(3), pyver + return result + +# +# Extended metadata functionality +# + +def _get_external_data(url): + result = {} + try: + # urlopen might fail if it runs into redirections, + # because of Python issue #13696. Fixed in locators + # using a custom redirect handler. + resp = urlopen(url) + headers = resp.info() + if headers.get('Content-Type') != 'application/json': + logger.debug('Unexpected response for JSON request') + else: + reader = codecs.getreader('utf-8')(resp) + #data = reader.read().decode('utf-8') + #result = json.loads(data) + result = json.load(reader) + except Exception as e: + logger.exception('Failed to get external data for %s: %s', url, e) + return result + + +def get_project_data(name): + url = ('https://www.red-dove.com/pypi/projects/' + '%s/%s/project.json' % (name[0].upper(), name)) + result = _get_external_data(url) + return result + +def get_package_data(dist): + name, version = dist.name, dist.version + url = ('https://www.red-dove.com/pypi/projects/' + '%s/%s/package-%s.json' % (name[0].upper(), name, version)) + result = _get_external_data(url) + if 'metadata' in result and dist.metadata: + update_metadata(dist.metadata, result) + return result + +RENAMES = { # Temporary + 'classifiers': 'Classifier', + 'use_2to3': None, + 'use_2to3_fixers': None, + 'test_suite': None, +} + +def update_metadata(metadata, pkginfo): + # update dist's metadata from received package data + assert metadata + assert 'metadata' in pkginfo + for k, v in pkginfo['metadata'].items(): + k = k.replace('-', '_') + k = RENAMES.get(k, k) + if k is not None: + metadata[k] = v + metadata.set_metadata_version() + if 'requirements' in pkginfo: + metadata.dependencies = pkginfo['requirements'] + + +# +# Simple event pub/sub +# + +class EventMixin(object): + """ + A very simple publish/subscribe system. + """ + def __init__(self): + self._subscribers = {} + + def add(self, event, subscriber, append=True): + """ + Add a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be added (and called when the + event is published). + :param append: Whether to append or prepend the subscriber to an + existing subscriber list for the event. + """ + subs = self._subscribers + if event not in subs: + subs[event] = deque([subscriber]) + else: + sq = subs[event] + if append: + sq.append(subscriber) + else: + sq.appendleft(subscriber) + + def remove(self, event, subscriber): + """ + Remove a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be removed. + """ + subs = self._subscribers + if event not in subs: + raise ValueError('No subscribers: %r' % event) + subs[event].remove(subscriber) + + def get_subscribers(self, event): + """ + Return an iterator for the subscribers for an event. + :param event: The event to return subscribers for. + """ + return iter(self._subscribers.get(event, ())) + + def publish(self, event, *args, **kwargs): + """ + Publish a event and return a list of values returned by its + subscribers. + + :param event: The event to publish. + :param args: The positional arguments to pass to the event's + subscribers. + :param kwargs: The keyword arguments to pass to the event's + subscribers. + """ + result = [] + for subscriber in self.get_subscribers(event): + try: + value = subscriber(event, *args, **kwargs) + except Exception: + logger.exception('Exception during event publication') + value = None + result.append(value) + logger.debug('publish %s: args = %s, kwargs = %s, result = %s', + event, args, kwargs, result) + return result + +# +# Simple sequencing +# +class Sequencer(object): + def __init__(self): + self._preds = {} + self._succs = {} + self._nodes = set() # nodes with no preds/succs + + def add_node(self, node): + self._nodes.add(node) + + def remove_node(self, node): + self._nodes.remove(node) + + def add(self, pred, succ): + assert pred != succ + self._preds.setdefault(succ, set()).add(pred) + self._succs.setdefault(pred, set()).add(succ) + + def remove(self, pred, succ): + assert pred != succ + try: + preds = self._preds[succ] + succs = self._succs[pred] + except KeyError: + raise ValueError('%r not a successor of anything' % succ) + try: + preds.remove(pred) + succs.remove(succ) + except KeyError: + raise ValueError('%r not a successor of %r' % (succ, pred)) + + def is_step(self, step): + return (step in self._preds or step in self._succs or + step in self._nodes) + + def get_steps(self, final): + if not self.is_step(final): + raise ValueError('Unknown: %r' % final) + result = [] + todo = [] + seen = set() + todo.append(final) + while todo: + step = todo.pop(0) + if step in seen: + # if a step was already seen, + # move it to the end (so it will appear earlier + # when reversed on return) ... but not for the + # final step, as that would be confusing for + # users + if step != final: + result.remove(step) + result.append(step) + else: + seen.add(step) + result.append(step) + preds = self._preds.get(step, ()) + todo.extend(preds) + return reversed(result) + + @property + def strong_connections(self): + #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + index_counter = [0] + stack = [] + lowlinks = {} + index = {} + result = [] + + graph = self._succs + + def strongconnect(node): + # set the depth index for this node to the smallest unused index + index[node] = index_counter[0] + lowlinks[node] = index_counter[0] + index_counter[0] += 1 + stack.append(node) + + # Consider successors + try: + successors = graph[node] + except Exception: + successors = [] + for successor in successors: + if successor not in lowlinks: + # Successor has not yet been visited + strongconnect(successor) + lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + elif successor in stack: + # the successor is in the stack and hence in the current + # strongly connected component (SCC) + lowlinks[node] = min(lowlinks[node],index[successor]) + + # If `node` is a root node, pop the stack and generate an SCC + if lowlinks[node] == index[node]: + connected_component = [] + + while True: + successor = stack.pop() + connected_component.append(successor) + if successor == node: break + component = tuple(connected_component) + # storing the result + result.append(component) + + for node in graph: + if node not in lowlinks: + strongconnect(node) + + return result + + @property + def dot(self): + result = ['digraph G {'] + for succ in self._preds: + preds = self._preds[succ] + for pred in preds: + result.append(' %s -> %s;' % (pred, succ)) + for node in self._nodes: + result.append(' %s;' % node) + result.append('}') + return '\n'.join(result) + +# +# Unarchiving functionality for zip, tar, tgz, tbz, whl +# + +ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', + '.tgz', '.tbz', '.whl') + +def unarchive(archive_filename, dest_dir, format=None, check=True): + + def check_path(path): + if not isinstance(path, text_type): + path = path.decode('utf-8') + p = os.path.abspath(os.path.join(dest_dir, path)) + if not p.startswith(dest_dir) or p[plen] != os.sep: + raise ValueError('path outside destination: %r' % p) + + dest_dir = os.path.abspath(dest_dir) + plen = len(dest_dir) + archive = None + if format is None: + if archive_filename.endswith(('.zip', '.whl')): + format = 'zip' + elif archive_filename.endswith(('.tar.gz', '.tgz')): + format = 'tgz' + mode = 'r:gz' + elif archive_filename.endswith(('.tar.bz2', '.tbz')): + format = 'tbz' + mode = 'r:bz2' + elif archive_filename.endswith('.tar'): + format = 'tar' + mode = 'r' + else: + raise ValueError('Unknown format for %r' % archive_filename) + try: + if format == 'zip': + archive = zipfile.ZipFile(archive_filename, 'r') + if check: + names = archive.namelist() + for name in names: + check_path(name) + else: + archive = tarfile.open(archive_filename, mode) + if check: + names = archive.getnames() + for name in names: + check_path(name) + if format != 'zip' and sys.version_info[0] < 3: + # See Python issue 17153. If the dest path contains Unicode, + # tarfile extraction fails on Python 2.x if a member path name + # contains non-ASCII characters - it leads to an implicit + # bytes -> unicode conversion using ASCII to decode. + for tarinfo in archive.getmembers(): + if not isinstance(tarinfo.name, text_type): + tarinfo.name = tarinfo.name.decode('utf-8') + archive.extractall(dest_dir) + + finally: + if archive: + archive.close() + + +def zip_dir(directory): + """zip a directory tree into a BytesIO object""" + result = io.BytesIO() + dlen = len(directory) + with zipfile.ZipFile(result, "w") as zf: + for root, dirs, files in os.walk(directory): + for name in files: + full = os.path.join(root, name) + rel = root[dlen:] + dest = os.path.join(rel, name) + zf.write(full, dest) + return result + +# +# Simple progress bar +# + +UNITS = ('', 'K', 'M', 'G','T','P') + +class Progress(object): + unknown = 'UNKNOWN' + + def __init__(self, minval=0, maxval=100): + assert maxval is None or maxval >= minval + self.min = self.cur = minval + self.max = maxval + self.started = None + self.elapsed = 0 + self.done = False + + def update(self, curval): + assert self.min <= curval + assert self.max is None or curval <= self.max + self.cur = curval + now = time.time() + if self.started is None: + self.started = now + else: + self.elapsed = now - self.started + + def increment(self, incr): + assert incr >= 0 + self.update(self.cur + incr) + + def start(self): + self.update(self.min) + return self + + def stop(self): + if self.max is not None: + self.update(self.max) + self.done = True + + @property + def maximum(self): + return self.unknown if self.max is None else self.max + + @property + def percentage(self): + if self.done: + result = '100 %' + elif self.max is None: + result = ' ?? %' + else: + v = 100.0 * (self.cur - self.min) / (self.max - self.min) + result = '%3d %%' % v + return result + + def format_duration(self, duration): + if (duration <= 0) and self.max is None or self.cur == self.min: + result = '??:??:??' + #elif duration < 1: + # result = '--:--:--' + else: + result = time.strftime('%H:%M:%S', time.gmtime(duration)) + return result + + @property + def ETA(self): + if self.done: + prefix = 'Done' + t = self.elapsed + #import pdb; pdb.set_trace() + else: + prefix = 'ETA ' + if self.max is None: + t = -1 + elif self.elapsed == 0 or (self.cur == self.min): + t = 0 + else: + #import pdb; pdb.set_trace() + t = float(self.max - self.min) + t /= self.cur - self.min + t = (t - 1) * self.elapsed + return '%s: %s' % (prefix, self.format_duration(t)) + + @property + def speed(self): + if self.elapsed == 0: + result = 0.0 + else: + result = (self.cur - self.min) / self.elapsed + for unit in UNITS: + if result < 1000: + break + result /= 1000.0 + return '%d %sB/s' % (result, unit) + +# +# Glob functionality +# + +RICH_GLOB = re.compile(r'\{([^}]*)\}') +_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') +_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') + + +def iglob(path_glob): + """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" + if _CHECK_RECURSIVE_GLOB.search(path_glob): + msg = """invalid glob %r: recursive glob "**" must be used alone""" + raise ValueError(msg % path_glob) + if _CHECK_MISMATCH_SET.search(path_glob): + msg = """invalid glob %r: mismatching set marker '{' or '}'""" + raise ValueError(msg % path_glob) + return _iglob(path_glob) + + +def _iglob(path_glob): + rich_path_glob = RICH_GLOB.split(path_glob, 1) + if len(rich_path_glob) > 1: + assert len(rich_path_glob) == 3, rich_path_glob + prefix, set, suffix = rich_path_glob + for item in set.split(','): + for path in _iglob(''.join((prefix, item, suffix))): + yield path + else: + if '**' not in path_glob: + for item in std_iglob(path_glob): + yield item + else: + prefix, radical = path_glob.split('**', 1) + if prefix == '': + prefix = '.' + if radical == '': + radical = '*' + else: + # we support both + radical = radical.lstrip('/') + radical = radical.lstrip('\\') + for path, dir, files in os.walk(prefix): + path = os.path.normpath(path) + for file in _iglob(os.path.join(path, radical)): + yield file + + + +# +# HTTPSConnection which verifies certificates/matches domains +# + +class HTTPSConnection(httplib.HTTPSConnection): + ca_certs = None # set this to the path to the certs file (.pem) + check_domain = True # only used if ca_certs is not None + + # noinspection PyPropertyAccess + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, '_tunnel_host', False): + self.sock = sock + self._tunnel() + + if not hasattr(ssl, 'SSLContext'): + # For 2.x + if self.ca_certs: + cert_reqs = ssl.CERT_REQUIRED + else: + cert_reqs = ssl.CERT_NONE + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + cert_reqs=cert_reqs, + ssl_version=ssl.PROTOCOL_SSLv23, + ca_certs=self.ca_certs) + else: + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.options |= ssl.OP_NO_SSLv2 + if self.cert_file: + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + self.sock = context.wrap_socket(sock, **kwargs) + if self.ca_certs and self.check_domain: + try: + match_hostname(self.sock.getpeercert(), self.host) + except CertificateError: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + +class HTTPSHandler(BaseHTTPSHandler): + def __init__(self, ca_certs, check_domain=True): + BaseHTTPSHandler.__init__(self) + self.ca_certs = ca_certs + self.check_domain = check_domain + + def _conn_maker(self, *args, **kwargs): + """ + This is called to create a connection instance. Normally you'd + pass a connection class to do_open, but it doesn't actually check for + a class, and just expects a callable. As long as we behave just as a + constructor would have, we should be OK. If it ever changes so that + we *must* pass a class, we'll create an UnsafeHTTPSConnection class + which just sets check_domain to False in the class definition, and + choose which one to pass to do_open. + """ + result = HTTPSConnection(*args, **kwargs) + if self.ca_certs: + result.ca_certs = self.ca_certs + result.check_domain = self.check_domain + return result + + def https_open(self, req): + try: + return self.do_open(self._conn_maker, req) + except URLError as e: + if 'certificate verify failed' in str(e.reason): + raise CertificateError('Unable to verify server certificate ' + 'for %s' % req.host) + else: + raise + +# +# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- +# Middle proxy using HTTP listens on port 443, or an index mistakenly serves +# HTML containing a http://xyz link when it should be https://xyz), +# you can use the following handler class, which does not allow HTTP traffic. +# +# It works by inheriting from HTTPHandler - so build_opener won't add a +# handler for HTTP itself. +# +class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): + def http_open(self, req): + raise URLError('Unexpected HTTP request on what should be a secure ' + 'connection: %s' % req) + +# +# XML-RPC with timeouts +# + +_ver_info = sys.version_info[:2] + +if _ver_info == (2, 6): + class HTTP(httplib.HTTP): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + + class HTTPS(httplib.HTTPS): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + +class Transport(xmlrpclib.Transport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.Transport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, x509 = self.get_host_info(host) + if _ver_info == (2, 6): + result = HTTP(h, timeout=self.timeout) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + result = self._connection[1] + return result + +class SafeTransport(xmlrpclib.SafeTransport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.SafeTransport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, kwargs = self.get_host_info(host) + if not kwargs: + kwargs = {} + kwargs['timeout'] = self.timeout + if _ver_info == (2, 6): + result = HTTPS(host, None, **kwargs) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, + **kwargs) + result = self._connection[1] + return result + + +class ServerProxy(xmlrpclib.ServerProxy): + def __init__(self, uri, **kwargs): + self.timeout = timeout = kwargs.pop('timeout', None) + # The above classes only come into play if a timeout + # is specified + if timeout is not None: + scheme, _ = splittype(uri) + use_datetime = kwargs.get('use_datetime', 0) + if scheme == 'https': + tcls = SafeTransport + else: + tcls = Transport + kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) + self.transport = t + xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) + +# +# CSV functionality. This is provided because on 2.x, the csv module can't +# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. +# + +def _csv_open(fn, mode, **kwargs): + if sys.version_info[0] < 3: + mode += 'b' + else: + kwargs['newline'] = '' + return open(fn, mode, **kwargs) + + +class CSVBase(object): + defaults = { + 'delimiter': str(','), # The strs are used because we need native + 'quotechar': str('"'), # str in the csv API (2.x won't take + 'lineterminator': str('\n') # Unicode) + } + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stream.close() + + +class CSVReader(CSVBase): + def __init__(self, fn, **kwargs): + if 'stream' in kwargs: + stream = kwargs['stream'] + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + self.stream = stream + else: + self.stream = _csv_open(fn, 'r') + self.reader = csv.reader(self.stream, **self.defaults) + + def __iter__(self): + return self + + def next(self): + result = next(self.reader) + if sys.version_info[0] < 3: + for i, item in enumerate(result): + if not isinstance(item, text_type): + result[i] = item.decode('utf-8') + return result + + __next__ = next + +class CSVWriter(CSVBase): + def __init__(self, fn, **kwargs): + self.stream = _csv_open(fn, 'w') + self.writer = csv.writer(self.stream, **self.defaults) + + def writerow(self, row): + if sys.version_info[0] < 3: + r = [] + for item in row: + if isinstance(item, text_type): + item = item.encode('utf-8') + r.append(item) + row = r + self.writer.writerow(row) diff --git a/awx/lib/site-packages/pip/vendor/distlib/version.py b/awx/lib/site-packages/pip/vendor/distlib/version.py new file mode 100644 index 0000000000..1e45e31182 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/version.py @@ -0,0 +1,719 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Implementation of a flexible versioning scheme providing support for PEP-386, +distribute-compatible and semantic versioning. +""" + +import re + +from .compat import string_types + +__all__ = ['NormalizedVersion', 'NormalizedMatcher', + 'LegacyVersion', 'LegacyMatcher', + 'SemanticVersion', 'SemanticMatcher', + 'AdaptiveVersion', 'AdaptiveMatcher', + 'UnsupportedVersionError', 'HugeMajorVersionError', + 'suggest_normalized_version', 'suggest_semantic_version', + 'suggest_adaptive_version', + 'normalized_key', 'legacy_key', 'semantic_key', 'adaptive_key', + 'get_scheme'] + +class UnsupportedVersionError(Exception): + """This is an unsupported version.""" + pass + + +class HugeMajorVersionError(UnsupportedVersionError): + """An irrational version because the major version number is huge + (often because a year or date was used). + + See `error_on_huge_major_num` option in `NormalizedVersion` for details. + This guard can be disabled by setting that option False. + """ + pass + + +class _Common(object): + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + +class Version(_Common): + def __init__(self, s): + self._string = s = s.strip() + self._parts = parts = self.parse(s) + assert isinstance(parts, tuple) + assert len(parts) > 0 + + def parse(self, s): + raise NotImplementedError('please implement in a subclass') + + def _check_compatible(self, other): + if type(self) != type(other): + raise TypeError('cannot compare %r and %r' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + self._check_compatible(other) + return self._parts < other._parts + + def __gt__(self, other): + return not (self.__lt__(other) or self.__eq__(other)) + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self._parts) + + @property + def is_prerelease(self): + raise NotImplementedError('Please implement in subclasses.') + +class Matcher(_Common): + version_class = None + + predicate_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?") + constraint_re = re.compile(r'^(<=|>=|<|>|!=|==)?\s*([^\s,]+)$') + + _operators = { + "<": lambda x, y: x < y, + ">": lambda x, y: x > y, + "<=": lambda x, y: x == y or x < y, + ">=": lambda x, y: x == y or x > y, + "==": lambda x, y: x == y, + "!=": lambda x, y: x != y, + } + + def __init__(self, s): + if self.version_class is None: + raise ValueError('Please specify a version class') + self._string = s = s.strip() + m = self.predicate_re.match(s) + if not m: + raise ValueError('Not valid: %r' % s) + groups = m.groups('') + self.name = groups[0].strip() + self.key = self.name.lower() # for case-insensitive comparisons + clist = [] + if groups[2]: + constraints = [c.strip() for c in groups[2].split(',')] + for c in constraints: + m = self.constraint_re.match(c) + if not m: + raise ValueError('Invalid %r in %r' % (c, s)) + groups = m.groups('==') + clist.append((groups[0], self.version_class(groups[1]))) + self._parts = tuple(clist) + + def match(self, version): + """Check if the provided version matches the constraints.""" + if isinstance(version, string_types): + version = self.version_class(version) + for operator, constraint in self._parts: + if not self._operators[operator](version, constraint): + return False + return True + + @property + def exact_version(self): + result = None + if len(self._parts) == 1 and self._parts[0][0] == '==': + result = self._parts[0][1] + return result + + def _check_compatible(self, other): + if type(self) != type(other) or self.name != other.name: + raise TypeError('cannot compare %s and %s' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return (self.key == other.key and self._parts == other._parts) + + def __ne__(self, other): + return not self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self.key) + hash(self._parts) + +# A marker used in the second and third parts of the `parts` tuple, for +# versions that don't have those segments, to sort properly. An example +# of versions in sort order ('highest' last): +# 1.0b1 ((1,0), ('b',1), ('z',)) +# 1.0.dev345 ((1,0), ('z',), ('dev', 345)) +# 1.0 ((1,0), ('z',), ('z',)) +# 1.0.post256.dev345 ((1,0), ('z',), ('z', 'post', 256, 'dev', 345)) +# 1.0.post345 ((1,0), ('z',), ('z', 'post', 345, 'z')) +# ^ ^ ^ +# 'b' < 'z' ---------------------/ | | +# | | +# 'dev' < 'z' ----------------------------/ | +# | +# 'dev' < 'z' ----------------------------------------------/ +# 'f' for 'final' would be kind of nice, but due to bugs in the support of +# 'rc' we must use 'z' +_FINAL_MARKER = ('z',) + +_VERSION_RE = re.compile(r''' + ^ + (?P<version>\d+\.\d+(\.\d+)*) # minimum 'N.N' + (?: + (?P<prerel>[abc]|rc) # 'a'=alpha, 'b'=beta, 'c'=release candidate + # 'rc'= alias for release candidate + (?P<prerelversion>\d+(?:\.\d+)*) + )? + (?P<postdev>(\.post(?P<post>\d+))?(\.dev(?P<dev>\d+))?)? + $''', re.VERBOSE) + + +def _parse_numdots(s, full_ver, drop_zeroes=False, min_length=0): + """Parse 'N.N.N' sequences, return a list of ints. + + @param s {str} 'N.N.N...' sequence to be parsed + @param full_ver_str {str} The full version string from which this + comes. Used for error strings. + @param min_length {int} The length to which to pad the + returned list with zeros, if necessary. Default 0. + """ + result = [] + for n in s.split("."): + #if len(n) > 1 and n[0] == '0': + # raise UnsupportedVersionError("cannot have leading zero in " + # "version number segment: '%s' in %r" % (n, full_ver)) + result.append(int(n)) + if drop_zeroes: + while (result and result[-1] == 0 and + (1 + len(result)) > min_length): + result.pop() + return result + +def pep386_key(s, fail_on_huge_major_ver=True): + """Parses a string version into parts using PEP-386 logic.""" + + match = _VERSION_RE.search(s) + if not match: + raise UnsupportedVersionError(s) + + groups = match.groupdict() + parts = [] + + # main version + block = _parse_numdots(groups['version'], s, min_length=2) + parts.append(tuple(block)) + + # prerelease + prerel = groups.get('prerel') + if prerel is not None: + block = [prerel] + block += _parse_numdots(groups.get('prerelversion'), s, min_length=1) + parts.append(tuple(block)) + else: + parts.append(_FINAL_MARKER) + + # postdev + if groups.get('postdev'): + post = groups.get('post') + dev = groups.get('dev') + postdev = [] + if post is not None: + postdev.extend((_FINAL_MARKER[0], 'post', int(post))) + if dev is None: + postdev.append(_FINAL_MARKER[0]) + if dev is not None: + postdev.extend(('dev', int(dev))) + parts.append(tuple(postdev)) + else: + parts.append(_FINAL_MARKER) + if fail_on_huge_major_ver and parts[0][0] > 1980: + raise HugeMajorVersionError("huge major version number, %r, " + "which might cause future problems: %r" % (parts[0][0], s)) + return tuple(parts) + + +PEP426_VERSION_RE = re.compile('^(\d+\.\d+(\.\d+)*)((a|b|c|rc)(\d+))?' + '(\.(post)(\d+))?(\.(dev)(\d+))?$') + +def pep426_key(s, _=None): + s = s.strip() + m = PEP426_VERSION_RE.match(s) + if not m: + raise UnsupportedVersionError('Not a valid version: %s' % s) + groups = m.groups() + nums = tuple(int(v) for v in groups[0].split('.')) + while len(nums) > 1 and nums[-1] == 0: + nums = nums[:-1] + + pre = groups[3:5] + post = groups[6:8] + dev = groups[9:11] + if pre == (None, None): + pre = () + else: + pre = pre[0], int(pre[1]) + if post == (None, None): + post = () + else: + post = post[0], int(post[1]) + if dev == (None, None): + dev = () + else: + dev = dev[0], int(dev[1]) + if not pre: + # either before pre-release, or final release and after + if not post and dev: + # before pre-release + pre = ('a', -1) # to sort before a0 + else: + pre = ('z',) # to sort after all pre-releases + # now look at the state of post and dev. + if not post: + post = ('_',) # sort before 'a' + if not dev: + dev = ('final',) + + #print('%s -> %s' % (s, m.groups())) + return nums, pre, post, dev + + +normalized_key = pep426_key + +class NormalizedVersion(Version): + """A rational version. + + Good: + 1.2 # equivalent to "1.2.0" + 1.2.0 + 1.2a1 + 1.2.3a2 + 1.2.3b1 + 1.2.3c1 + 1.2.3.4 + TODO: fill this out + + Bad: + 1 # mininum two numbers + 1.2a # release level must have a release serial + 1.2.3b + """ + def parse(self, s): return normalized_key(s) + + PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) + + @property + def is_prerelease(self): + return any(t[0] in self.PREREL_TAGS for t in self._parts) + +class UnlimitedMajorVersion(Version): + def parse(self, s): return normalized_key(s, False) + +# We want '2.5' to match '2.5.4' but not '2.50'. + +def _match_at_front(x, y): + if x == y: + return True + x = str(x) + y = str(y) + if not x.startswith(y): + return False + n = len(y) + return x[n] == '.' + +class NormalizedMatcher(Matcher): + version_class = NormalizedVersion + + _operators = dict(Matcher._operators) + _operators.update({ + "<=": lambda x, y: _match_at_front(x, y) or x < y, + ">=": lambda x, y: _match_at_front(x, y) or x > y, + "==": lambda x, y: _match_at_front(x, y), + "!=": lambda x, y: not _match_at_front(x, y), + }) + +_REPLACEMENTS = ( + (re.compile('[.+-]$'), ''), # remove trailing puncts + (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start + (re.compile('^[.-]'), ''), # remove leading puncts + (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses + (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha + (re.compile(r'\b(pre-alpha|prealpha)\b'), + 'pre.alpha'), # standardise + (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses +) + +_SUFFIX_REPLACEMENTS = ( + (re.compile('^[:~._+-]+'), ''), # remove leading puncts + (re.compile('[,*")([\]]'), ''), # remove unwanted chars + (re.compile('[~:+_ -]'), '.'), # replace illegal chars + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\.$'), ''), # trailing '.' +) + +_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') + +def suggest_semantic_version(s): + """ + Try to suggest a semantic form for a version for which + suggest_normalized_version couldn't come up with anything. + """ + result = s.strip().lower() + for pat, repl in _REPLACEMENTS: + result = pat.sub(repl, result) + if not result: + result = '0.0.0' + + # Now look for numeric prefix, and separate it out from + # the rest. + #import pdb; pdb.set_trace() + m = _NUMERIC_PREFIX.match(result) + if not m: + prefix = '0.0.0' + suffix = result + else: + prefix = m.groups()[0].split('.') + prefix = [int(i) for i in prefix] + while len(prefix) < 3: + prefix.append(0) + if len(prefix) == 3: + suffix = result[m.end():] + else: + suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] + prefix = prefix[:3] + prefix = '.'.join([str(i) for i in prefix]) + suffix = suffix.strip() + if suffix: + #import pdb; pdb.set_trace() + # massage the suffix. + for pat, repl in _SUFFIX_REPLACEMENTS: + suffix = pat.sub(repl, suffix) + + if not suffix: + result = prefix + else: + sep = '-' if 'dev' in suffix else '+' + result = prefix + sep + suffix + if not is_semver(result): + result = None + return result + + +def suggest_normalized_version(s): + """Suggest a normalized version close to the given version string. + + If you have a version string that isn't rational (i.e. NormalizedVersion + doesn't like it) then you might be able to get an equivalent (or close) + rational version from this function. + + This does a number of simple normalizations to the given string, based + on observation of versions currently in use on PyPI. Given a dump of + those version during PyCon 2009, 4287 of them: + - 2312 (53.93%) match NormalizedVersion without change + with the automatic suggestion + - 3474 (81.04%) match when using this suggestion method + + @param s {str} An irrational version string. + @returns A rational version string, or None, if couldn't determine one. + """ + try: + normalized_key(s) + return s # already rational + except UnsupportedVersionError: + pass + + rs = s.lower() + + # part of this could use maketrans + for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), + ('beta', 'b'), ('rc', 'c'), ('-final', ''), + ('-pre', 'c'), + ('-release', ''), ('.release', ''), ('-stable', ''), + ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), + ('final', '')): + rs = rs.replace(orig, repl) + + # if something ends with dev or pre, we add a 0 + rs = re.sub(r"pre$", r"pre0", rs) + rs = re.sub(r"dev$", r"dev0", rs) + + # if we have something like "b-2" or "a.2" at the end of the + # version, that is pobably beta, alpha, etc + # let's remove the dash or dot + rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) + + # 1.0-dev-r371 -> 1.0.dev371 + # 0.1-dev-r79 -> 0.1.dev79 + rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) + + # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 + rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) + + # Clean: v0.3, v1.0 + if rs.startswith('v'): + rs = rs[1:] + + # Clean leading '0's on numbers. + #TODO: unintended side-effect on, e.g., "2003.05.09" + # PyPI stats: 77 (~2%) better + rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) + + # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers + # zero. + # PyPI stats: 245 (7.56%) better + rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) + + # the 'dev-rNNN' tag is a dev tag + rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) + + # clean the - when used as a pre delimiter + rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) + + # a terminal "dev" or "devel" can be changed into ".dev0" + rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) + + # a terminal "dev" can be changed into ".dev0" + rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) + + # a terminal "final" or "stable" can be removed + rs = re.sub(r"(final|stable)$", "", rs) + + # The 'r' and the '-' tags are post release tags + # 0.4a1.r10 -> 0.4a1.post10 + # 0.9.33-17222 -> 0.9.33.post17222 + # 0.9.33-r17222 -> 0.9.33.post17222 + rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) + + # Clean 'r' instead of 'dev' usage: + # 0.9.33+r17222 -> 0.9.33.dev17222 + # 1.0dev123 -> 1.0.dev123 + # 1.0.git123 -> 1.0.dev123 + # 1.0.bzr123 -> 1.0.dev123 + # 0.1a0dev.123 -> 0.1a0.dev123 + # PyPI stats: ~150 (~4%) better + rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) + + # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: + # 0.2.pre1 -> 0.2c1 + # 0.2-c1 -> 0.2c1 + # 1.0preview123 -> 1.0c123 + # PyPI stats: ~21 (0.62%) better + rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) + + # Tcl/Tk uses "px" for their post release markers + rs = re.sub(r"p(\d+)$", r".post\1", rs) + + try: + normalized_key(rs) + except UnsupportedVersionError: + rs = None + return rs + +def suggest_adaptive_version(s): + return suggest_normalized_version(s) or suggest_semantic_version(s) + +# +# Legacy version processing (distribute-compatible) +# + +_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) +_VERSION_REPLACE = { + 'pre':'c', + 'preview':'c', + '-':'final-', + 'rc':'c', + 'dev':'@', + '': None, + '.': None, +} + + +def legacy_key(s): + def get_parts(s): + result = [] + for p in _VERSION_PART.split(s.lower()): + p = _VERSION_REPLACE.get(p, p) + if p: + if '0' <= p[:1] <= '9': + p = p.zfill(8) + else: + p = '*' + p + result.append(p) + result.append('*final') + return result + + result = [] + for p in get_parts(s): + if p.startswith('*'): + if p < '*final': + while result and result[-1] == '*final-': + result.pop() + while result and result[-1] == '00000000': + result.pop() + result.append(p) + return tuple(result) + +class LegacyVersion(Version): + def parse(self, s): return legacy_key(s) + + PREREL_TAGS = set( + ['*a', '*alpha', '*b', '*beta', '*c', '*rc', '*r', '*@', '*pre'] + ) + + @property + def is_prerelease(self): + return any(x in self.PREREL_TAGS for x in self._parts) + +class LegacyMatcher(Matcher): + version_class = LegacyVersion + +# +# Semantic versioning +# + +_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' + r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' + r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) + +def is_semver(s): + return _SEMVER_RE.match(s) + +def semantic_key(s): + def make_tuple(s, absent): + if s is None: + result = (absent,) + else: + parts = s[1:].split('.') + # We can't compare ints and strings on Python 3, so fudge it + # by zero-filling numeric values so simulate a numeric comparison + result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) + return result + + result = None + m = is_semver(s) + if not m: + raise UnsupportedVersionError(s) + groups = m.groups() + major, minor, patch = [int(i) for i in groups[:3]] + # choose the '|' and '*' so that versions sort correctly + pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') + return ((major, minor, patch), pre, build) + + +class SemanticVersion(Version): + def parse(self, s): return semantic_key(s) + + @property + def is_prerelease(self): + return self._parts[1][0] != '|' + + +class SemanticMatcher(Matcher): + version_class = SemanticVersion + +# +# Adaptive versioning. When handed a legacy version string, tries to +# determine a suggested normalized version, and work with that. +# + +def adaptive_key(s): + try: + result = normalized_key(s, False) + except UnsupportedVersionError: + ss = suggest_normalized_version(s) + if ss is not None: + result = normalized_key(ss) # "guaranteed" to work + else: + ss = s # suggest_semantic_version(s) or s + result = semantic_key(ss) # let's hope ... + return result + + +class AdaptiveVersion(NormalizedVersion): + def parse(self, s): return adaptive_key(s) + + @property + def is_prerelease(self): + try: + normalized_key(self._string) + not_sem = True + except UnsupportedVersionError: + ss = suggest_normalized_version(self._string) + not_sem = ss is not None + if not_sem: + return any(t[0] in self.PREREL_TAGS for t in self._parts) + return self._parts[1][0] != '|' + +class AdaptiveMatcher(NormalizedMatcher): + version_class = AdaptiveVersion + + +class VersionScheme(object): + def __init__(self, key, matcher, suggester=None): + self.key = key + self.matcher = matcher + self.suggester = suggester + + def is_valid_version(self, s): + try: + self.matcher.version_class(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_matcher(self, s): + try: + self.matcher(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_constraint_list(self, s): + """ + Used for processing some metadata fields + """ + return self.is_valid_matcher('dummy_name (%s)' % s) + + def suggest(self, s): + if self.suggester is None: + result = None + else: + result = self.suggester(s) + return result + +_SCHEMES = { + 'normalized': VersionScheme(normalized_key, NormalizedMatcher, + suggest_normalized_version), + 'legacy': VersionScheme(legacy_key, LegacyMatcher, lambda self, s: s), + 'semantic': VersionScheme(semantic_key, SemanticMatcher, + suggest_semantic_version), + 'adaptive': VersionScheme(adaptive_key, AdaptiveMatcher, + suggest_adaptive_version), +} + +_SCHEMES['default'] = _SCHEMES['adaptive'] + +def get_scheme(name): + if name not in _SCHEMES: + raise ValueError('unknown scheme name: %r' % name) + return _SCHEMES[name] diff --git a/awx/lib/site-packages/pip/vendor/distlib/wheel.py b/awx/lib/site-packages/pip/vendor/distlib/wheel.py new file mode 100644 index 0000000000..8274732adc --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/distlib/wheel.py @@ -0,0 +1,638 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import base64 +import codecs +import datetime +import distutils.util +from email import message_from_file +import hashlib +import imp +import json +import logging +import os +import posixpath +import re +import shutil +import sys +import tempfile +import zipfile + +from . import DistlibException +from .compat import sysconfig, ZipFile, fsdecode, text_type, filter +from .database import DistributionPath, InstalledDistribution +from .metadata import Metadata +from .scripts import ScriptMaker +from .util import (FileOperator, convert_path, CSVReader, CSVWriter, + cached_property, get_cache_base) + + +logger = logging.getLogger(__name__) + + +if hasattr(sys, 'pypy_version_info'): + IMP_PREFIX = 'pp' +elif sys.platform.startswith('java'): + IMP_PREFIX = 'jy' +elif sys.platform == 'cli': + IMP_PREFIX = 'ip' +else: + IMP_PREFIX = 'cp' + +VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') +if not VER_SUFFIX: # pragma: no cover + VER_SUFFIX = '%s%s' % sys.version_info[:2] +PYVER = 'py' + VER_SUFFIX +IMPVER = IMP_PREFIX + VER_SUFFIX + +ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') + +ABI = sysconfig.get_config_var('SOABI') +if ABI and ABI.startswith('cpython-'): + ABI = ABI.replace('cpython-', 'cp') +else: + ABI = 'none' + +FILENAME_RE = re.compile(r''' +(?P<nm>[^-]+) +-(?P<vn>\d+[^-]*) +(-(?P<bn>\d+[^-]*))? +-(?P<py>\w+\d+(\.\w+\d+)*) +-(?P<bi>\w+) +-(?P<ar>\w+) +\.whl$ +''', re.IGNORECASE | re.VERBOSE) + +NAME_VERSION_RE = re.compile(r''' +(?P<nm>[^-]+) +-(?P<vn>\d+[^-]*) +(-(?P<bn>\d+[^-]*))?$ +''', re.IGNORECASE | re.VERBOSE) + +SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') + +if os.sep == '/': + to_posix = lambda o: o +else: + to_posix = lambda o: o.replace(os.sep, '/') + + +class Mounter(object): + def __init__(self): + self.impure_wheels = {} + self.libs = {} + + def add(self, pathname, extensions): + self.impure_wheels[pathname] = extensions + self.libs.update(extensions) + + def remove(self, pathname): + extensions = self.impure_wheels.pop(pathname) + for k, v in extensions: + if k in self.libs: + del self.libs[k] + + def find_module(self, fullname, path=None): + if fullname in self.libs: + result = self + else: + result = None + return result + + def load_module(self, fullname): + if fullname in sys.modules: + result = sys.modules[fullname] + else: + if fullname not in self.libs: + raise ImportError('unable to find extension for %s' % fullname) + result = imp.load_dynamic(fullname, self.libs[fullname]) + result.__loader__ = self + result.__package__, _ = fullname.rsplit('.', 1) + return result + +_hook = Mounter() + + +class Wheel(object): + """ + Class to build and install from Wheel files (PEP 427). + """ + + wheel_version = (1, 0) + hash_kind = 'sha256' + + def __init__(self, filename=None, sign=False, verify=False): + """ + Initialise an instance using a (valid) filename. + """ + self.sign = sign + self.verify = verify + self.buildver = '' + self.pyver = [PYVER] + self.abi = ['none'] + self.arch = ['any'] + self.dirname = os.getcwd() + if filename is None: + self.name = 'dummy' + self.version = '0.1' + self._filename = self.filename + else: + m = NAME_VERSION_RE.match(filename) + if m: + info = m.groupdict('') + self.name = info['nm'] + self.version = info['vn'] + self.buildver = info['bn'] + self._filename = self.filename + else: + dirname, filename = os.path.split(filename) + m = FILENAME_RE.match(filename) + if not m: + raise DistlibException('Invalid name or ' + 'filename: %r' % filename) + if dirname: + self.dirname = os.path.abspath(dirname) + self._filename = filename + info = m.groupdict('') + self.name = info['nm'] + self.version = info['vn'] + self.buildver = info['bn'] + self.pyver = info['py'].split('.') + self.abi = info['bi'].split('.') + self.arch = info['ar'].split('.') + + @property + def filename(self): + """ + Build and return a filename from the various components. + """ + if self.buildver: + buildver = '-' + self.buildver + else: + buildver = '' + pyver = '.'.join(self.pyver) + abi = '.'.join(self.abi) + arch = '.'.join(self.arch) + return '%s-%s%s-%s-%s-%s.whl' % (self.name, self.version, buildver, + pyver, abi, arch) + + @property + def tags(self): + for pyver in self.pyver: + for abi in self.abi: + for arch in self.arch: + yield pyver, abi, arch + + @cached_property + def metadata(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + metadata_filename = posixpath.join(info_dir, 'METADATA') + wrapper = codecs.getreader('utf-8') + with ZipFile(pathname, 'r') as zf: + with zf.open(metadata_filename) as bf: + wf = wrapper(bf) + result = Metadata() + result.read_file(wf) + return result + + @cached_property + def info(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + metadata_filename = posixpath.join(info_dir, 'WHEEL') + wrapper = codecs.getreader('utf-8') + with ZipFile(pathname, 'r') as zf: + with zf.open(metadata_filename) as bf: + wf = wrapper(bf) + message = message_from_file(wf) + result = dict(message) + return result + + def process_shebang(self, data): + m = SHEBANG_RE.match(data) + if m: + data = b'#!python' + data[m.end():] + else: + cr = data.find(b'\r') + lf = data.find(b'\n') + if cr < 0 or cr > lf: + term = b'\n' + else: + if data[cr:cr + 2] == b'\r\n': + term = b'\r\n' + else: + term = b'\r' + data = b'#!python' + term + data + return data + + def get_hash(self, data, hash_kind=None): + if hash_kind is None: + hash_kind = self.hash_kind + try: + hasher = getattr(hashlib, hash_kind) + except AttributeError: + raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) + result = hasher(data).digest() + result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') + return hash_kind, result + + def write_record(self, records, record_path, base): + with CSVWriter(record_path) as writer: + for row in records: + writer.writerow(row) + p = to_posix(os.path.relpath(record_path, base)) + writer.writerow((p, '', '')) + + def build(self, paths, tags=None): + """ + Build a wheel from files in specified paths, and use any specified tags + when determining the name of the wheel. + """ + if tags is None: + tags = {} + + libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] + if libkey == 'platlib': + is_pure = 'false' + default_pyver = [IMPVER] + default_abi = [ABI] + default_arch = [ARCH] + else: + is_pure = 'true' + default_pyver = [PYVER] + default_abi = ['none'] + default_arch = ['any'] + + self.pyver = tags.get('pyver', default_pyver) + self.abi = tags.get('abi', default_abi) + self.arch = tags.get('arch', default_arch) + + libdir = paths[libkey] + + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + archive_paths = [] + + # First, stuff which is not in site-packages + for key in ('data', 'headers', 'scripts'): + if key not in paths: + continue + path = paths[key] + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + for fn in files: + p = fsdecode(os.path.join(root, fn)) + rp = os.path.relpath(p, path) + ap = to_posix(os.path.join(data_dir, key, rp)) + archive_paths.append((ap, p)) + if key == 'scripts' and not p.endswith('.exe'): + with open(p, 'rb') as f: + data = f.read() + data = self.process_shebang(data) + with open(p, 'wb') as f: + f.write(data) + + # Now, stuff which is in site-packages, other than the + # distinfo stuff. + path = libdir + distinfo = None + for root, dirs, files in os.walk(path): + if root == path: + # At the top level only, save distinfo for later + # and skip it for now + for i, dn in enumerate(dirs): + dn = fsdecode(dn) + if dn.endswith('.dist-info'): + distinfo = os.path.join(root, dn) + del dirs[i] + break + assert distinfo, '.dist-info directory expected, not found' + + for fn in files: + # comment out next suite to leave .pyc files in + if fsdecode(fn).endswith(('.pyc', '.pyo')): + continue + p = os.path.join(root, fn) + rp = to_posix(os.path.relpath(p, path)) + archive_paths.append((rp, p)) + + # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. + files = os.listdir(distinfo) + for fn in files: + if fn not in ('RECORD', 'INSTALLER', 'SHARED'): + p = fsdecode(os.path.join(distinfo, fn)) + ap = to_posix(os.path.join(info_dir, fn)) + archive_paths.append((ap, p)) + + import distlib + + wheel_metadata = [ + 'Wheel-Version: %d.%d' % self.wheel_version, + 'Generator: distlib %s' % distlib.__version__, + 'Root-Is-Purelib: %s' % is_pure, + ] + for pyver, abi, arch in self.tags: + wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) + p = os.path.join(distinfo, 'WHEEL') + with open(p, 'w') as f: + f.write('\n'.join(wheel_metadata)) + ap = to_posix(os.path.join(info_dir, 'WHEEL')) + archive_paths.append((ap, p)) + + # Now, at last, RECORD. + # Paths in here are archive paths - nothing else makes sense. + records = [] + hasher = getattr(hashlib, self.hash_kind) + for ap, p in archive_paths: + with open(p, 'rb') as f: + data = f.read() + digest = '%s=%s' % self.get_hash(data) + size = os.path.getsize(p) + records.append((ap, digest, size)) + + p = os.path.join(distinfo, 'RECORD') + self.write_record(records, p, libdir) + ap = to_posix(os.path.join(info_dir, 'RECORD')) + archive_paths.append((ap, p)) + # Now, ready to build the zip file + pathname = os.path.join(self.dirname, self.filename) + with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: + for ap, p in archive_paths: + logger.debug('Wrote %s to %s in wheel', p, ap) + zf.write(p, ap) + return pathname + + def install(self, paths, dry_run=False, executable=None, warner=None): + """ + Install a wheel to the specified paths. If ``executable`` is specified, + it should be the Unicode absolute path the to the executable written + into the shebang lines of any scripts installed. If ``warner`` is + specified, it should be a callable, which will be called with two + tuples indicating the wheel version of this software and the wheel + version in the file, if there is a discrepancy in the versions. + This can be used to issue any warnings to raise any exceptions. + """ + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if (file_version != self.wheel_version) and warner: + warner(self.wheel_version, file_version) + + if message['Root-Is-Purelib'] == 'true': + libdir = paths['purelib'] + else: + libdir = paths['platlib'] + records = {} + with zf.open(record_name) as bf: + with CSVReader(record_name, stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + data_pfx = posixpath.join(data_dir, '') + script_pfx = posixpath.join(data_dir, 'scripts', '') + + fileop = FileOperator(dry_run=dry_run) + fileop.record = True # so we can rollback if needed + + bc = not sys.dont_write_bytecode # Double negatives. Lovely! + + outfiles = [] # for RECORD writing + + # for script copying/shebang processing + workdir = tempfile.mkdtemp() + # set target dir later + # we default add_launchers to False, as the + # Python Launcher should be used instead + maker = ScriptMaker(workdir, None, fileop=fileop, + add_launchers=False) + maker.executable = executable + try: + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + is_script = (u_arcname.startswith(script_pfx) + and not u_arcname.endswith('.exe')) + + if u_arcname.startswith(data_pfx): + _, where, rp = u_arcname.split('/', 2) + outfile = os.path.join(paths[where], convert_path(rp)) + else: + # meant for site-packages. + if u_arcname in (wheel_metadata_name, record_name): + continue + outfile = os.path.join(libdir, convert_path(u_arcname)) + if not is_script: + with zf.open(arcname) as bf: + fileop.copy_stream(bf, outfile) + outfiles.append(outfile) + # Double check the digest of the written file + if not dry_run and row[1]: + with open(outfile, 'rb') as bf: + data = bf.read() + _, newdigest = self.get_hash(data, kind) + if newdigest != digest: + raise DistlibException('digest mismatch ' + 'on write for ' + '%s' % outfile) + if bc and outfile.endswith('.py'): + try: + pyc = fileop.byte_compile(outfile) + outfiles.append(pyc) + except Exception: + # Don't give up if byte-compilation fails, + # but log it and perhaps warn the user + logger.warning('Byte-compilation failed', + exc_info=True) + else: + fn = os.path.basename(convert_path(arcname)) + workname = os.path.join(workdir, fn) + with zf.open(arcname) as bf: + fileop.copy_stream(bf, workname) + + dn, fn = os.path.split(outfile) + maker.target_dir = dn + filenames = maker.make(fn) + fileop.set_executable_mode(filenames) + outfiles.extend(filenames) + + p = os.path.join(libdir, info_dir) + dist = InstalledDistribution(p) + + # Write SHARED + paths = dict(paths) # don't change passed in dict + del paths['purelib'] + del paths['platlib'] + paths['lib'] = libdir + p = dist.write_shared_locations(paths, dry_run) + outfiles.append(p) + + # Write RECORD + dist.write_installed_files(outfiles, paths['prefix'], + dry_run) + return dist + except Exception as e: # pragma: no cover + logger.exception('installation failed.') + fileop.rollback() + raise + finally: + shutil.rmtree(workdir) + + def _get_dylib_cache(self): + result = os.path.join(get_cache_base(), 'dylib-cache') + if not os.path.isdir(result): + os.makedirs(result) + return result + + def _get_extensions(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + arcname = posixpath.join(info_dir, 'EXTENSIONS') + wrapper = codecs.getreader('utf-8') + result = [] + with ZipFile(pathname, 'r') as zf: + try: + with zf.open(arcname) as bf: + wf = wrapper(bf) + extensions = json.load(wf) + cache_base = self._get_dylib_cache() + for name, relpath in extensions.items(): + dest = os.path.join(cache_base, convert_path(relpath)) + if not os.path.exists(dest): + extract = True + else: + file_time = os.stat(dest).st_mtime + file_time = datetime.datetime.fromtimestamp(file_time) + info = zf.getinfo(relpath) + wheel_time = datetime.datetime(*info.date_time) + extract = wheel_time > file_time + if extract: + zf.extract(relpath, cache_base) + result.append((name, dest)) + except KeyError: + pass + return result + + def mount(self, append=False): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if not is_compatible(self): + msg = 'Wheel %s not mountable in this Python.' % pathname + raise DistlibException(msg) + if pathname in sys.path: + logger.debug('%s already in path', pathname) + else: + if append: + sys.path.append(pathname) + else: + sys.path.insert(0, pathname) + extensions = self._get_extensions() + if extensions: + if _hook not in sys.meta_path: + sys.meta_path.append(_hook) + _hook.add(pathname, extensions) + + def unmount(self): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if pathname not in sys.path: + logger.debug('%s not in path', pathname) + else: + sys.path.remove(pathname) + if pathname in _hook.impure_wheels: + _hook.remove(pathname) + if not _hook.impure_wheels: + if _hook in sys.meta_path: + sys.meta_path.remove(_hook) + + +def compatible_tags(): + """ + Return (pyver, abi, arch) tuples compatible with this Python. + """ + versions = [VER_SUFFIX] + major = VER_SUFFIX[0] + for minor in range(sys.version_info[1] - 1, - 1, -1): + versions.append(''.join([major, str(minor)])) + + abis = [] + for suffix, _, _ in imp.get_suffixes(): + if suffix.startswith('.abi'): + abis.append(suffix.split('.', 2)[1]) + abis.sort() + if ABI != 'none': + abis.insert(0, ABI) + abis.append('none') + result = [] + + # Most specific - our Python version, ABI and arch + for abi in abis: + result.append((''.join((IMP_PREFIX, versions[0])), abi, ARCH)) + + # where no ABI / arch dependency, but IMP_PREFIX dependency + for i, version in enumerate(versions): + result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) + if i == 0: + result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) + + # no IMP_PREFIX, ABI or arch dependency + for i, version in enumerate(versions): + result.append((''.join(('py', version)), 'none', 'any')) + if i == 0: + result.append((''.join(('py', version[0])), 'none', 'any')) + return result + + +COMPATIBLE_TAGS = compatible_tags() + +del compatible_tags + +def is_compatible(wheel, tags=None): + if not isinstance(wheel, Wheel): + wheel = Wheel(wheel) # assume it's a filename + result = False + if tags is None: + tags = COMPATIBLE_TAGS + for ver, abi, arch in tags: + if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: + result = True + break + return result diff --git a/awx/lib/site-packages/pip/vendor/html5lib/__init__.py b/awx/lib/site-packages/pip/vendor/html5lib/__init__.py new file mode 100644 index 0000000000..10e2b74c29 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/__init__.py @@ -0,0 +1,23 @@ +""" +HTML parsing library based on the WHATWG "HTML5" +specification. The parser is designed to be compatible with existing +HTML found in the wild and implements well-defined error recovery that +is largely compatible with modern desktop web browsers. + +Example usage: + +import html5lib +f = open("my_document.html") +tree = html5lib.parse(f) +""" + +from __future__ import absolute_import, division, unicode_literals + +from .html5parser import HTMLParser, parse, parseFragment +from .treebuilders import getTreeBuilder +from .treewalkers import getTreeWalker +from .serializer import serialize + +__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", + "getTreeWalker", "serialize"] +__version__ = "1.0b1" diff --git a/awx/lib/site-packages/pip/vendor/html5lib/constants.py b/awx/lib/site-packages/pip/vendor/html5lib/constants.py new file mode 100644 index 0000000000..1866dd78e7 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/constants.py @@ -0,0 +1,3086 @@ +from __future__ import absolute_import, division, unicode_literals + +import string +import gettext +_ = gettext.gettext + +EOF = None + +E = { + "null-character": + _("Null character in input stream, replaced with U+FFFD."), + "invalid-codepoint": + _("Invalid codepoint in stream."), + "incorrectly-placed-solidus": + _("Solidus (/) incorrectly placed in tag."), + "incorrect-cr-newline-entity": + _("Incorrect CR newline entity, replaced with LF."), + "illegal-windows-1252-entity": + _("Entity used with illegal number (windows-1252 reference)."), + "cant-convert-numeric-entity": + _("Numeric entity couldn't be converted to character " + "(codepoint U+%(charAsInt)08x)."), + "illegal-codepoint-for-numeric-entity": + _("Numeric entity represents an illegal codepoint: " + "U+%(charAsInt)08x."), + "numeric-entity-without-semicolon": + _("Numeric entity didn't end with ';'."), + "expected-numeric-entity-but-got-eof": + _("Numeric entity expected. Got end of file instead."), + "expected-numeric-entity": + _("Numeric entity expected but none found."), + "named-entity-without-semicolon": + _("Named entity didn't end with ';'."), + "expected-named-entity": + _("Named entity expected. Got none."), + "attributes-in-end-tag": + _("End tag contains unexpected attributes."), + 'self-closing-flag-on-end-tag': + _("End tag contains unexpected self-closing flag."), + "expected-tag-name-but-got-right-bracket": + _("Expected tag name. Got '>' instead."), + "expected-tag-name-but-got-question-mark": + _("Expected tag name. Got '?' instead. (HTML doesn't " + "support processing instructions.)"), + "expected-tag-name": + _("Expected tag name. Got something else instead"), + "expected-closing-tag-but-got-right-bracket": + _("Expected closing tag. Got '>' instead. Ignoring '</>'."), + "expected-closing-tag-but-got-eof": + _("Expected closing tag. Unexpected end of file."), + "expected-closing-tag-but-got-char": + _("Expected closing tag. Unexpected character '%(data)s' found."), + "eof-in-tag-name": + _("Unexpected end of file in the tag name."), + "expected-attribute-name-but-got-eof": + _("Unexpected end of file. Expected attribute name instead."), + "eof-in-attribute-name": + _("Unexpected end of file in attribute name."), + "invalid-character-in-attribute-name": + _("Invalid character in attribute name"), + "duplicate-attribute": + _("Dropped duplicate attribute on tag."), + "expected-end-of-tag-name-but-got-eof": + _("Unexpected end of file. Expected = or end of tag."), + "expected-attribute-value-but-got-eof": + _("Unexpected end of file. Expected attribute value."), + "expected-attribute-value-but-got-right-bracket": + _("Expected attribute value. Got '>' instead."), + 'equals-in-unquoted-attribute-value': + _("Unexpected = in unquoted attribute"), + 'unexpected-character-in-unquoted-attribute-value': + _("Unexpected character in unquoted attribute"), + "invalid-character-after-attribute-name": + _("Unexpected character after attribute name."), + "unexpected-character-after-attribute-value": + _("Unexpected character after attribute value."), + "eof-in-attribute-value-double-quote": + _("Unexpected end of file in attribute value (\")."), + "eof-in-attribute-value-single-quote": + _("Unexpected end of file in attribute value (')."), + "eof-in-attribute-value-no-quotes": + _("Unexpected end of file in attribute value."), + "unexpected-EOF-after-solidus-in-tag": + _("Unexpected end of file in tag. Expected >"), + "unexpected-character-after-solidus-in-tag": + _("Unexpected character after / in tag. Expected >"), + "expected-dashes-or-doctype": + _("Expected '--' or 'DOCTYPE'. Not found."), + "unexpected-bang-after-double-dash-in-comment": + _("Unexpected ! after -- in comment"), + "unexpected-space-after-double-dash-in-comment": + _("Unexpected space after -- in comment"), + "incorrect-comment": + _("Incorrect comment."), + "eof-in-comment": + _("Unexpected end of file in comment."), + "eof-in-comment-end-dash": + _("Unexpected end of file in comment (-)"), + "unexpected-dash-after-double-dash-in-comment": + _("Unexpected '-' after '--' found in comment."), + "eof-in-comment-double-dash": + _("Unexpected end of file in comment (--)."), + "eof-in-comment-end-space-state": + _("Unexpected end of file in comment."), + "eof-in-comment-end-bang-state": + _("Unexpected end of file in comment."), + "unexpected-char-in-comment": + _("Unexpected character in comment found."), + "need-space-after-doctype": + _("No space after literal string 'DOCTYPE'."), + "expected-doctype-name-but-got-right-bracket": + _("Unexpected > character. Expected DOCTYPE name."), + "expected-doctype-name-but-got-eof": + _("Unexpected end of file. Expected DOCTYPE name."), + "eof-in-doctype-name": + _("Unexpected end of file in DOCTYPE name."), + "eof-in-doctype": + _("Unexpected end of file in DOCTYPE."), + "expected-space-or-right-bracket-in-doctype": + _("Expected space or '>'. Got '%(data)s'"), + "unexpected-end-of-doctype": + _("Unexpected end of DOCTYPE."), + "unexpected-char-in-doctype": + _("Unexpected character in DOCTYPE."), + "eof-in-innerhtml": + _("XXX innerHTML EOF"), + "unexpected-doctype": + _("Unexpected DOCTYPE. Ignored."), + "non-html-root": + _("html needs to be the first start tag."), + "expected-doctype-but-got-eof": + _("Unexpected End of file. Expected DOCTYPE."), + "unknown-doctype": + _("Erroneous DOCTYPE."), + "expected-doctype-but-got-chars": + _("Unexpected non-space characters. Expected DOCTYPE."), + "expected-doctype-but-got-start-tag": + _("Unexpected start tag (%(name)s). Expected DOCTYPE."), + "expected-doctype-but-got-end-tag": + _("Unexpected end tag (%(name)s). Expected DOCTYPE."), + "end-tag-after-implied-root": + _("Unexpected end tag (%(name)s) after the (implied) root element."), + "expected-named-closing-tag-but-got-eof": + _("Unexpected end of file. Expected end tag (%(name)s)."), + "two-heads-are-not-better-than-one": + _("Unexpected start tag head in existing head. Ignored."), + "unexpected-end-tag": + _("Unexpected end tag (%(name)s). Ignored."), + "unexpected-start-tag-out-of-my-head": + _("Unexpected start tag (%(name)s) that can be in head. Moved."), + "unexpected-start-tag": + _("Unexpected start tag (%(name)s)."), + "missing-end-tag": + _("Missing end tag (%(name)s)."), + "missing-end-tags": + _("Missing end tags (%(name)s)."), + "unexpected-start-tag-implies-end-tag": + _("Unexpected start tag (%(startName)s) " + "implies end tag (%(endName)s)."), + "unexpected-start-tag-treated-as": + _("Unexpected start tag (%(originalName)s). Treated as %(newName)s."), + "deprecated-tag": + _("Unexpected start tag %(name)s. Don't use it!"), + "unexpected-start-tag-ignored": + _("Unexpected start tag %(name)s. Ignored."), + "expected-one-end-tag-but-got-another": + _("Unexpected end tag (%(gotName)s). " + "Missing end tag (%(expectedName)s)."), + "end-tag-too-early": + _("End tag (%(name)s) seen too early. Expected other end tag."), + "end-tag-too-early-named": + _("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."), + "end-tag-too-early-ignored": + _("End tag (%(name)s) seen too early. Ignored."), + "adoption-agency-1.1": + _("End tag (%(name)s) violates step 1, " + "paragraph 1 of the adoption agency algorithm."), + "adoption-agency-1.2": + _("End tag (%(name)s) violates step 1, " + "paragraph 2 of the adoption agency algorithm."), + "adoption-agency-1.3": + _("End tag (%(name)s) violates step 1, " + "paragraph 3 of the adoption agency algorithm."), + "adoption-agency-4.4": + _("End tag (%(name)s) violates step 4, " + "paragraph 4 of the adoption agency algorithm."), + "unexpected-end-tag-treated-as": + _("Unexpected end tag (%(originalName)s). Treated as %(newName)s."), + "no-end-tag": + _("This element (%(name)s) has no end tag."), + "unexpected-implied-end-tag-in-table": + _("Unexpected implied end tag (%(name)s) in the table phase."), + "unexpected-implied-end-tag-in-table-body": + _("Unexpected implied end tag (%(name)s) in the table body phase."), + "unexpected-char-implies-table-voodoo": + _("Unexpected non-space characters in " + "table context caused voodoo mode."), + "unexpected-hidden-input-in-table": + _("Unexpected input with type hidden in table context."), + "unexpected-form-in-table": + _("Unexpected form in table context."), + "unexpected-start-tag-implies-table-voodoo": + _("Unexpected start tag (%(name)s) in " + "table context caused voodoo mode."), + "unexpected-end-tag-implies-table-voodoo": + _("Unexpected end tag (%(name)s) in " + "table context caused voodoo mode."), + "unexpected-cell-in-table-body": + _("Unexpected table cell start tag (%(name)s) " + "in the table body phase."), + "unexpected-cell-end-tag": + _("Got table cell end tag (%(name)s) " + "while required end tags are missing."), + "unexpected-end-tag-in-table-body": + _("Unexpected end tag (%(name)s) in the table body phase. Ignored."), + "unexpected-implied-end-tag-in-table-row": + _("Unexpected implied end tag (%(name)s) in the table row phase."), + "unexpected-end-tag-in-table-row": + _("Unexpected end tag (%(name)s) in the table row phase. Ignored."), + "unexpected-select-in-select": + _("Unexpected select start tag in the select phase " + "treated as select end tag."), + "unexpected-input-in-select": + _("Unexpected input start tag in the select phase."), + "unexpected-start-tag-in-select": + _("Unexpected start tag token (%(name)s in the select phase. " + "Ignored."), + "unexpected-end-tag-in-select": + _("Unexpected end tag (%(name)s) in the select phase. Ignored."), + "unexpected-table-element-start-tag-in-select-in-table": + _("Unexpected table element start tag (%(name)s) in the select in table phase."), + "unexpected-table-element-end-tag-in-select-in-table": + _("Unexpected table element end tag (%(name)s) in the select in table phase."), + "unexpected-char-after-body": + _("Unexpected non-space characters in the after body phase."), + "unexpected-start-tag-after-body": + _("Unexpected start tag token (%(name)s)" + " in the after body phase."), + "unexpected-end-tag-after-body": + _("Unexpected end tag token (%(name)s)" + " in the after body phase."), + "unexpected-char-in-frameset": + _("Unexpected characters in the frameset phase. Characters ignored."), + "unexpected-start-tag-in-frameset": + _("Unexpected start tag token (%(name)s)" + " in the frameset phase. Ignored."), + "unexpected-frameset-in-frameset-innerhtml": + _("Unexpected end tag token (frameset) " + "in the frameset phase (innerHTML)."), + "unexpected-end-tag-in-frameset": + _("Unexpected end tag token (%(name)s)" + " in the frameset phase. Ignored."), + "unexpected-char-after-frameset": + _("Unexpected non-space characters in the " + "after frameset phase. Ignored."), + "unexpected-start-tag-after-frameset": + _("Unexpected start tag (%(name)s)" + " in the after frameset phase. Ignored."), + "unexpected-end-tag-after-frameset": + _("Unexpected end tag (%(name)s)" + " in the after frameset phase. Ignored."), + "unexpected-end-tag-after-body-innerhtml": + _("Unexpected end tag after body(innerHtml)"), + "expected-eof-but-got-char": + _("Unexpected non-space characters. Expected end of file."), + "expected-eof-but-got-start-tag": + _("Unexpected start tag (%(name)s)" + ". Expected end of file."), + "expected-eof-but-got-end-tag": + _("Unexpected end tag (%(name)s)" + ". Expected end of file."), + "eof-in-table": + _("Unexpected end of file. Expected table content."), + "eof-in-select": + _("Unexpected end of file. Expected select content."), + "eof-in-frameset": + _("Unexpected end of file. Expected frameset content."), + "eof-in-script-in-script": + _("Unexpected end of file. Expected script content."), + "eof-in-foreign-lands": + _("Unexpected end of file. Expected foreign content"), + "non-void-element-with-trailing-solidus": + _("Trailing solidus not allowed on element %(name)s"), + "unexpected-html-element-in-foreign-content": + _("Element %(name)s not allowed in a non-html context"), + "unexpected-end-tag-before-html": + _("Unexpected end tag (%(name)s) before html."), + "XXX-undefined-error": + _("Undefined error (this sucks and should be fixed)"), +} + +namespaces = { + "html": "http://www.w3.org/1999/xhtml", + "mathml": "http://www.w3.org/1998/Math/MathML", + "svg": "http://www.w3.org/2000/svg", + "xlink": "http://www.w3.org/1999/xlink", + "xml": "http://www.w3.org/XML/1998/namespace", + "xmlns": "http://www.w3.org/2000/xmlns/" +} + +scopingElements = frozenset(( + (namespaces["html"], "applet"), + (namespaces["html"], "caption"), + (namespaces["html"], "html"), + (namespaces["html"], "marquee"), + (namespaces["html"], "object"), + (namespaces["html"], "table"), + (namespaces["html"], "td"), + (namespaces["html"], "th"), + (namespaces["mathml"], "mi"), + (namespaces["mathml"], "mo"), + (namespaces["mathml"], "mn"), + (namespaces["mathml"], "ms"), + (namespaces["mathml"], "mtext"), + (namespaces["mathml"], "annotation-xml"), + (namespaces["svg"], "foreignObject"), + (namespaces["svg"], "desc"), + (namespaces["svg"], "title"), +)) + +formattingElements = frozenset(( + (namespaces["html"], "a"), + (namespaces["html"], "b"), + (namespaces["html"], "big"), + (namespaces["html"], "code"), + (namespaces["html"], "em"), + (namespaces["html"], "font"), + (namespaces["html"], "i"), + (namespaces["html"], "nobr"), + (namespaces["html"], "s"), + (namespaces["html"], "small"), + (namespaces["html"], "strike"), + (namespaces["html"], "strong"), + (namespaces["html"], "tt"), + (namespaces["html"], "u") +)) + +specialElements = frozenset(( + (namespaces["html"], "address"), + (namespaces["html"], "applet"), + (namespaces["html"], "area"), + (namespaces["html"], "article"), + (namespaces["html"], "aside"), + (namespaces["html"], "base"), + (namespaces["html"], "basefont"), + (namespaces["html"], "bgsound"), + (namespaces["html"], "blockquote"), + (namespaces["html"], "body"), + (namespaces["html"], "br"), + (namespaces["html"], "button"), + (namespaces["html"], "caption"), + (namespaces["html"], "center"), + (namespaces["html"], "col"), + (namespaces["html"], "colgroup"), + (namespaces["html"], "command"), + (namespaces["html"], "dd"), + (namespaces["html"], "details"), + (namespaces["html"], "dir"), + (namespaces["html"], "div"), + (namespaces["html"], "dl"), + (namespaces["html"], "dt"), + (namespaces["html"], "embed"), + (namespaces["html"], "fieldset"), + (namespaces["html"], "figure"), + (namespaces["html"], "footer"), + (namespaces["html"], "form"), + (namespaces["html"], "frame"), + (namespaces["html"], "frameset"), + (namespaces["html"], "h1"), + (namespaces["html"], "h2"), + (namespaces["html"], "h3"), + (namespaces["html"], "h4"), + (namespaces["html"], "h5"), + (namespaces["html"], "h6"), + (namespaces["html"], "head"), + (namespaces["html"], "header"), + (namespaces["html"], "hr"), + (namespaces["html"], "html"), + (namespaces["html"], "iframe"), + # Note that image is commented out in the spec as "this isn't an + # element that can end up on the stack, so it doesn't matter," + (namespaces["html"], "image"), + (namespaces["html"], "img"), + (namespaces["html"], "input"), + (namespaces["html"], "isindex"), + (namespaces["html"], "li"), + (namespaces["html"], "link"), + (namespaces["html"], "listing"), + (namespaces["html"], "marquee"), + (namespaces["html"], "menu"), + (namespaces["html"], "meta"), + (namespaces["html"], "nav"), + (namespaces["html"], "noembed"), + (namespaces["html"], "noframes"), + (namespaces["html"], "noscript"), + (namespaces["html"], "object"), + (namespaces["html"], "ol"), + (namespaces["html"], "p"), + (namespaces["html"], "param"), + (namespaces["html"], "plaintext"), + (namespaces["html"], "pre"), + (namespaces["html"], "script"), + (namespaces["html"], "section"), + (namespaces["html"], "select"), + (namespaces["html"], "style"), + (namespaces["html"], "table"), + (namespaces["html"], "tbody"), + (namespaces["html"], "td"), + (namespaces["html"], "textarea"), + (namespaces["html"], "tfoot"), + (namespaces["html"], "th"), + (namespaces["html"], "thead"), + (namespaces["html"], "title"), + (namespaces["html"], "tr"), + (namespaces["html"], "ul"), + (namespaces["html"], "wbr"), + (namespaces["html"], "xmp"), + (namespaces["svg"], "foreignObject") +)) + +htmlIntegrationPointElements = frozenset(( + (namespaces["mathml"], "annotaion-xml"), + (namespaces["svg"], "foreignObject"), + (namespaces["svg"], "desc"), + (namespaces["svg"], "title") +)) + +mathmlTextIntegrationPointElements = frozenset(( + (namespaces["mathml"], "mi"), + (namespaces["mathml"], "mo"), + (namespaces["mathml"], "mn"), + (namespaces["mathml"], "ms"), + (namespaces["mathml"], "mtext") +)) + +spaceCharacters = frozenset(( + "\t", + "\n", + "\u000C", + " ", + "\r" +)) + +tableInsertModeElements = frozenset(( + "table", + "tbody", + "tfoot", + "thead", + "tr" +)) + +asciiLowercase = frozenset(string.ascii_lowercase) +asciiUppercase = frozenset(string.ascii_uppercase) +asciiLetters = frozenset(string.ascii_letters) +digits = frozenset(string.digits) +hexDigits = frozenset(string.hexdigits) + +asciiUpper2Lower = dict([(ord(c), ord(c.lower())) + for c in string.ascii_uppercase]) + +# Heading elements need to be ordered +headingElements = ( + "h1", + "h2", + "h3", + "h4", + "h5", + "h6" +) + +voidElements = frozenset(( + "base", + "command", + "event-source", + "link", + "meta", + "hr", + "br", + "img", + "embed", + "param", + "area", + "col", + "input", + "source", + "track" +)) + +cdataElements = frozenset(('title', 'textarea')) + +rcdataElements = frozenset(( + 'style', + 'script', + 'xmp', + 'iframe', + 'noembed', + 'noframes', + 'noscript' +)) + +booleanAttributes = { + "": frozenset(("irrelevant",)), + "style": frozenset(("scoped",)), + "img": frozenset(("ismap",)), + "audio": frozenset(("autoplay", "controls")), + "video": frozenset(("autoplay", "controls")), + "script": frozenset(("defer", "async")), + "details": frozenset(("open",)), + "datagrid": frozenset(("multiple", "disabled")), + "command": frozenset(("hidden", "disabled", "checked", "default")), + "hr": frozenset(("noshade")), + "menu": frozenset(("autosubmit",)), + "fieldset": frozenset(("disabled", "readonly")), + "option": frozenset(("disabled", "readonly", "selected")), + "optgroup": frozenset(("disabled", "readonly")), + "button": frozenset(("disabled", "autofocus")), + "input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")), + "select": frozenset(("disabled", "readonly", "autofocus", "multiple")), + "output": frozenset(("disabled", "readonly")), +} + +# entitiesWindows1252 has to be _ordered_ and needs to have an index. It +# therefore can't be a frozenset. +entitiesWindows1252 = ( + 8364, # 0x80 0x20AC EURO SIGN + 65533, # 0x81 UNDEFINED + 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK + 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK + 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK + 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS + 8224, # 0x86 0x2020 DAGGER + 8225, # 0x87 0x2021 DOUBLE DAGGER + 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT + 8240, # 0x89 0x2030 PER MILLE SIGN + 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON + 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK + 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE + 65533, # 0x8D UNDEFINED + 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON + 65533, # 0x8F UNDEFINED + 65533, # 0x90 UNDEFINED + 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK + 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK + 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK + 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK + 8226, # 0x95 0x2022 BULLET + 8211, # 0x96 0x2013 EN DASH + 8212, # 0x97 0x2014 EM DASH + 732, # 0x98 0x02DC SMALL TILDE + 8482, # 0x99 0x2122 TRADE MARK SIGN + 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON + 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE + 65533, # 0x9D UNDEFINED + 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON + 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS +) + +xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;')) + +entities = { + "AElig": "\xc6", + "AElig;": "\xc6", + "AMP": "&", + "AMP;": "&", + "Aacute": "\xc1", + "Aacute;": "\xc1", + "Abreve;": "\u0102", + "Acirc": "\xc2", + "Acirc;": "\xc2", + "Acy;": "\u0410", + "Afr;": "\U0001d504", + "Agrave": "\xc0", + "Agrave;": "\xc0", + "Alpha;": "\u0391", + "Amacr;": "\u0100", + "And;": "\u2a53", + "Aogon;": "\u0104", + "Aopf;": "\U0001d538", + "ApplyFunction;": "\u2061", + "Aring": "\xc5", + "Aring;": "\xc5", + "Ascr;": "\U0001d49c", + "Assign;": "\u2254", + "Atilde": "\xc3", + "Atilde;": "\xc3", + "Auml": "\xc4", + "Auml;": "\xc4", + "Backslash;": "\u2216", + "Barv;": "\u2ae7", + "Barwed;": "\u2306", + "Bcy;": "\u0411", + "Because;": "\u2235", + "Bernoullis;": "\u212c", + "Beta;": "\u0392", + "Bfr;": "\U0001d505", + "Bopf;": "\U0001d539", + "Breve;": "\u02d8", + "Bscr;": "\u212c", + "Bumpeq;": "\u224e", + "CHcy;": "\u0427", + "COPY": "\xa9", + "COPY;": "\xa9", + "Cacute;": "\u0106", + "Cap;": "\u22d2", + "CapitalDifferentialD;": "\u2145", + "Cayleys;": "\u212d", + "Ccaron;": "\u010c", + "Ccedil": "\xc7", + "Ccedil;": "\xc7", + "Ccirc;": "\u0108", + "Cconint;": "\u2230", + "Cdot;": "\u010a", + "Cedilla;": "\xb8", + "CenterDot;": "\xb7", + "Cfr;": "\u212d", + "Chi;": "\u03a7", + "CircleDot;": "\u2299", + "CircleMinus;": "\u2296", + "CirclePlus;": "\u2295", + "CircleTimes;": "\u2297", + "ClockwiseContourIntegral;": "\u2232", + "CloseCurlyDoubleQuote;": "\u201d", + "CloseCurlyQuote;": "\u2019", + "Colon;": "\u2237", + "Colone;": "\u2a74", + "Congruent;": "\u2261", + "Conint;": "\u222f", + "ContourIntegral;": "\u222e", + "Copf;": "\u2102", + "Coproduct;": "\u2210", + "CounterClockwiseContourIntegral;": "\u2233", + "Cross;": "\u2a2f", + "Cscr;": "\U0001d49e", + "Cup;": "\u22d3", + "CupCap;": "\u224d", + "DD;": "\u2145", + "DDotrahd;": "\u2911", + "DJcy;": "\u0402", + "DScy;": "\u0405", + "DZcy;": "\u040f", + "Dagger;": "\u2021", + "Darr;": "\u21a1", + "Dashv;": "\u2ae4", + "Dcaron;": "\u010e", + "Dcy;": "\u0414", + "Del;": "\u2207", + "Delta;": "\u0394", + "Dfr;": "\U0001d507", + "DiacriticalAcute;": "\xb4", + "DiacriticalDot;": "\u02d9", + "DiacriticalDoubleAcute;": "\u02dd", + "DiacriticalGrave;": "`", + "DiacriticalTilde;": "\u02dc", + "Diamond;": "\u22c4", + "DifferentialD;": "\u2146", + "Dopf;": "\U0001d53b", + "Dot;": "\xa8", + "DotDot;": "\u20dc", + "DotEqual;": "\u2250", + "DoubleContourIntegral;": "\u222f", + "DoubleDot;": "\xa8", + "DoubleDownArrow;": "\u21d3", + "DoubleLeftArrow;": "\u21d0", + "DoubleLeftRightArrow;": "\u21d4", + "DoubleLeftTee;": "\u2ae4", + "DoubleLongLeftArrow;": "\u27f8", + "DoubleLongLeftRightArrow;": "\u27fa", + "DoubleLongRightArrow;": "\u27f9", + "DoubleRightArrow;": "\u21d2", + "DoubleRightTee;": "\u22a8", + "DoubleUpArrow;": "\u21d1", + "DoubleUpDownArrow;": "\u21d5", + "DoubleVerticalBar;": "\u2225", + "DownArrow;": "\u2193", + "DownArrowBar;": "\u2913", + "DownArrowUpArrow;": "\u21f5", + "DownBreve;": "\u0311", + "DownLeftRightVector;": "\u2950", + "DownLeftTeeVector;": "\u295e", + "DownLeftVector;": "\u21bd", + "DownLeftVectorBar;": "\u2956", + "DownRightTeeVector;": "\u295f", + "DownRightVector;": "\u21c1", + "DownRightVectorBar;": "\u2957", + "DownTee;": "\u22a4", + "DownTeeArrow;": "\u21a7", + "Downarrow;": "\u21d3", + "Dscr;": "\U0001d49f", + "Dstrok;": "\u0110", + "ENG;": "\u014a", + "ETH": "\xd0", + "ETH;": "\xd0", + "Eacute": "\xc9", + "Eacute;": "\xc9", + "Ecaron;": "\u011a", + "Ecirc": "\xca", + "Ecirc;": "\xca", + "Ecy;": "\u042d", + "Edot;": "\u0116", + "Efr;": "\U0001d508", + "Egrave": "\xc8", + "Egrave;": "\xc8", + "Element;": "\u2208", + "Emacr;": "\u0112", + "EmptySmallSquare;": "\u25fb", + "EmptyVerySmallSquare;": "\u25ab", + "Eogon;": "\u0118", + "Eopf;": "\U0001d53c", + "Epsilon;": "\u0395", + "Equal;": "\u2a75", + "EqualTilde;": "\u2242", + "Equilibrium;": "\u21cc", + "Escr;": "\u2130", + "Esim;": "\u2a73", + "Eta;": "\u0397", + "Euml": "\xcb", + "Euml;": "\xcb", + "Exists;": "\u2203", + "ExponentialE;": "\u2147", + "Fcy;": "\u0424", + "Ffr;": "\U0001d509", + "FilledSmallSquare;": "\u25fc", + "FilledVerySmallSquare;": "\u25aa", + "Fopf;": "\U0001d53d", + "ForAll;": "\u2200", + "Fouriertrf;": "\u2131", + "Fscr;": "\u2131", + "GJcy;": "\u0403", + "GT": ">", + "GT;": ">", + "Gamma;": "\u0393", + "Gammad;": "\u03dc", + "Gbreve;": "\u011e", + "Gcedil;": "\u0122", + "Gcirc;": "\u011c", + "Gcy;": "\u0413", + "Gdot;": "\u0120", + "Gfr;": "\U0001d50a", + "Gg;": "\u22d9", + "Gopf;": "\U0001d53e", + "GreaterEqual;": "\u2265", + "GreaterEqualLess;": "\u22db", + "GreaterFullEqual;": "\u2267", + "GreaterGreater;": "\u2aa2", + "GreaterLess;": "\u2277", + "GreaterSlantEqual;": "\u2a7e", + "GreaterTilde;": "\u2273", + "Gscr;": "\U0001d4a2", + "Gt;": "\u226b", + "HARDcy;": "\u042a", + "Hacek;": "\u02c7", + "Hat;": "^", + "Hcirc;": "\u0124", + "Hfr;": "\u210c", + "HilbertSpace;": "\u210b", + "Hopf;": "\u210d", + "HorizontalLine;": "\u2500", + "Hscr;": "\u210b", + "Hstrok;": "\u0126", + "HumpDownHump;": "\u224e", + "HumpEqual;": "\u224f", + "IEcy;": "\u0415", + "IJlig;": "\u0132", + "IOcy;": "\u0401", + "Iacute": "\xcd", + "Iacute;": "\xcd", + "Icirc": "\xce", + "Icirc;": "\xce", + "Icy;": "\u0418", + "Idot;": "\u0130", + "Ifr;": "\u2111", + "Igrave": "\xcc", + "Igrave;": "\xcc", + "Im;": "\u2111", + "Imacr;": "\u012a", + "ImaginaryI;": "\u2148", + "Implies;": "\u21d2", + "Int;": "\u222c", + "Integral;": "\u222b", + "Intersection;": "\u22c2", + "InvisibleComma;": "\u2063", + "InvisibleTimes;": "\u2062", + "Iogon;": "\u012e", + "Iopf;": "\U0001d540", + "Iota;": "\u0399", + "Iscr;": "\u2110", + "Itilde;": "\u0128", + "Iukcy;": "\u0406", + "Iuml": "\xcf", + "Iuml;": "\xcf", + "Jcirc;": "\u0134", + "Jcy;": "\u0419", + "Jfr;": "\U0001d50d", + "Jopf;": "\U0001d541", + "Jscr;": "\U0001d4a5", + "Jsercy;": "\u0408", + "Jukcy;": "\u0404", + "KHcy;": "\u0425", + "KJcy;": "\u040c", + "Kappa;": "\u039a", + "Kcedil;": "\u0136", + "Kcy;": "\u041a", + "Kfr;": "\U0001d50e", + "Kopf;": "\U0001d542", + "Kscr;": "\U0001d4a6", + "LJcy;": "\u0409", + "LT": "<", + "LT;": "<", + "Lacute;": "\u0139", + "Lambda;": "\u039b", + "Lang;": "\u27ea", + "Laplacetrf;": "\u2112", + "Larr;": "\u219e", + "Lcaron;": "\u013d", + "Lcedil;": "\u013b", + "Lcy;": "\u041b", + "LeftAngleBracket;": "\u27e8", + "LeftArrow;": "\u2190", + "LeftArrowBar;": "\u21e4", + "LeftArrowRightArrow;": "\u21c6", + "LeftCeiling;": "\u2308", + "LeftDoubleBracket;": "\u27e6", + "LeftDownTeeVector;": "\u2961", + "LeftDownVector;": "\u21c3", + "LeftDownVectorBar;": "\u2959", + "LeftFloor;": "\u230a", + "LeftRightArrow;": "\u2194", + "LeftRightVector;": "\u294e", + "LeftTee;": "\u22a3", + "LeftTeeArrow;": "\u21a4", + "LeftTeeVector;": "\u295a", + "LeftTriangle;": "\u22b2", + "LeftTriangleBar;": "\u29cf", + "LeftTriangleEqual;": "\u22b4", + "LeftUpDownVector;": "\u2951", + "LeftUpTeeVector;": "\u2960", + "LeftUpVector;": "\u21bf", + "LeftUpVectorBar;": "\u2958", + "LeftVector;": "\u21bc", + "LeftVectorBar;": "\u2952", + "Leftarrow;": "\u21d0", + "Leftrightarrow;": "\u21d4", + "LessEqualGreater;": "\u22da", + "LessFullEqual;": "\u2266", + "LessGreater;": "\u2276", + "LessLess;": "\u2aa1", + "LessSlantEqual;": "\u2a7d", + "LessTilde;": "\u2272", + "Lfr;": "\U0001d50f", + "Ll;": "\u22d8", + "Lleftarrow;": "\u21da", + "Lmidot;": "\u013f", + "LongLeftArrow;": "\u27f5", + "LongLeftRightArrow;": "\u27f7", + "LongRightArrow;": "\u27f6", + "Longleftarrow;": "\u27f8", + "Longleftrightarrow;": "\u27fa", + "Longrightarrow;": "\u27f9", + "Lopf;": "\U0001d543", + "LowerLeftArrow;": "\u2199", + "LowerRightArrow;": "\u2198", + "Lscr;": "\u2112", + "Lsh;": "\u21b0", + "Lstrok;": "\u0141", + "Lt;": "\u226a", + "Map;": "\u2905", + "Mcy;": "\u041c", + "MediumSpace;": "\u205f", + "Mellintrf;": "\u2133", + "Mfr;": "\U0001d510", + "MinusPlus;": "\u2213", + "Mopf;": "\U0001d544", + "Mscr;": "\u2133", + "Mu;": "\u039c", + "NJcy;": "\u040a", + "Nacute;": "\u0143", + "Ncaron;": "\u0147", + "Ncedil;": "\u0145", + "Ncy;": "\u041d", + "NegativeMediumSpace;": "\u200b", + "NegativeThickSpace;": "\u200b", + "NegativeThinSpace;": "\u200b", + "NegativeVeryThinSpace;": "\u200b", + "NestedGreaterGreater;": "\u226b", + "NestedLessLess;": "\u226a", + "NewLine;": "\n", + "Nfr;": "\U0001d511", + "NoBreak;": "\u2060", + "NonBreakingSpace;": "\xa0", + "Nopf;": "\u2115", + "Not;": "\u2aec", + "NotCongruent;": "\u2262", + "NotCupCap;": "\u226d", + "NotDoubleVerticalBar;": "\u2226", + "NotElement;": "\u2209", + "NotEqual;": "\u2260", + "NotEqualTilde;": "\u2242\u0338", + "NotExists;": "\u2204", + "NotGreater;": "\u226f", + "NotGreaterEqual;": "\u2271", + "NotGreaterFullEqual;": "\u2267\u0338", + "NotGreaterGreater;": "\u226b\u0338", + "NotGreaterLess;": "\u2279", + "NotGreaterSlantEqual;": "\u2a7e\u0338", + "NotGreaterTilde;": "\u2275", + "NotHumpDownHump;": "\u224e\u0338", + "NotHumpEqual;": "\u224f\u0338", + "NotLeftTriangle;": "\u22ea", + "NotLeftTriangleBar;": "\u29cf\u0338", + "NotLeftTriangleEqual;": "\u22ec", + "NotLess;": "\u226e", + "NotLessEqual;": "\u2270", + "NotLessGreater;": "\u2278", + "NotLessLess;": "\u226a\u0338", + "NotLessSlantEqual;": "\u2a7d\u0338", + "NotLessTilde;": "\u2274", + "NotNestedGreaterGreater;": "\u2aa2\u0338", + "NotNestedLessLess;": "\u2aa1\u0338", + "NotPrecedes;": "\u2280", + "NotPrecedesEqual;": "\u2aaf\u0338", + "NotPrecedesSlantEqual;": "\u22e0", + "NotReverseElement;": "\u220c", + "NotRightTriangle;": "\u22eb", + "NotRightTriangleBar;": "\u29d0\u0338", + "NotRightTriangleEqual;": "\u22ed", + "NotSquareSubset;": "\u228f\u0338", + "NotSquareSubsetEqual;": "\u22e2", + "NotSquareSuperset;": "\u2290\u0338", + "NotSquareSupersetEqual;": "\u22e3", + "NotSubset;": "\u2282\u20d2", + "NotSubsetEqual;": "\u2288", + "NotSucceeds;": "\u2281", + "NotSucceedsEqual;": "\u2ab0\u0338", + "NotSucceedsSlantEqual;": "\u22e1", + "NotSucceedsTilde;": "\u227f\u0338", + "NotSuperset;": "\u2283\u20d2", + "NotSupersetEqual;": "\u2289", + "NotTilde;": "\u2241", + "NotTildeEqual;": "\u2244", + "NotTildeFullEqual;": "\u2247", + "NotTildeTilde;": "\u2249", + "NotVerticalBar;": "\u2224", + "Nscr;": "\U0001d4a9", + "Ntilde": "\xd1", + "Ntilde;": "\xd1", + "Nu;": "\u039d", + "OElig;": "\u0152", + "Oacute": "\xd3", + "Oacute;": "\xd3", + "Ocirc": "\xd4", + "Ocirc;": "\xd4", + "Ocy;": "\u041e", + "Odblac;": "\u0150", + "Ofr;": "\U0001d512", + "Ograve": "\xd2", + "Ograve;": "\xd2", + "Omacr;": "\u014c", + "Omega;": "\u03a9", + "Omicron;": "\u039f", + "Oopf;": "\U0001d546", + "OpenCurlyDoubleQuote;": "\u201c", + "OpenCurlyQuote;": "\u2018", + "Or;": "\u2a54", + "Oscr;": "\U0001d4aa", + "Oslash": "\xd8", + "Oslash;": "\xd8", + "Otilde": "\xd5", + "Otilde;": "\xd5", + "Otimes;": "\u2a37", + "Ouml": "\xd6", + "Ouml;": "\xd6", + "OverBar;": "\u203e", + "OverBrace;": "\u23de", + "OverBracket;": "\u23b4", + "OverParenthesis;": "\u23dc", + "PartialD;": "\u2202", + "Pcy;": "\u041f", + "Pfr;": "\U0001d513", + "Phi;": "\u03a6", + "Pi;": "\u03a0", + "PlusMinus;": "\xb1", + "Poincareplane;": "\u210c", + "Popf;": "\u2119", + "Pr;": "\u2abb", + "Precedes;": "\u227a", + "PrecedesEqual;": "\u2aaf", + "PrecedesSlantEqual;": "\u227c", + "PrecedesTilde;": "\u227e", + "Prime;": "\u2033", + "Product;": "\u220f", + "Proportion;": "\u2237", + "Proportional;": "\u221d", + "Pscr;": "\U0001d4ab", + "Psi;": "\u03a8", + "QUOT": "\"", + "QUOT;": "\"", + "Qfr;": "\U0001d514", + "Qopf;": "\u211a", + "Qscr;": "\U0001d4ac", + "RBarr;": "\u2910", + "REG": "\xae", + "REG;": "\xae", + "Racute;": "\u0154", + "Rang;": "\u27eb", + "Rarr;": "\u21a0", + "Rarrtl;": "\u2916", + "Rcaron;": "\u0158", + "Rcedil;": "\u0156", + "Rcy;": "\u0420", + "Re;": "\u211c", + "ReverseElement;": "\u220b", + "ReverseEquilibrium;": "\u21cb", + "ReverseUpEquilibrium;": "\u296f", + "Rfr;": "\u211c", + "Rho;": "\u03a1", + "RightAngleBracket;": "\u27e9", + "RightArrow;": "\u2192", + "RightArrowBar;": "\u21e5", + "RightArrowLeftArrow;": "\u21c4", + "RightCeiling;": "\u2309", + "RightDoubleBracket;": "\u27e7", + "RightDownTeeVector;": "\u295d", + "RightDownVector;": "\u21c2", + "RightDownVectorBar;": "\u2955", + "RightFloor;": "\u230b", + "RightTee;": "\u22a2", + "RightTeeArrow;": "\u21a6", + "RightTeeVector;": "\u295b", + "RightTriangle;": "\u22b3", + "RightTriangleBar;": "\u29d0", + "RightTriangleEqual;": "\u22b5", + "RightUpDownVector;": "\u294f", + "RightUpTeeVector;": "\u295c", + "RightUpVector;": "\u21be", + "RightUpVectorBar;": "\u2954", + "RightVector;": "\u21c0", + "RightVectorBar;": "\u2953", + "Rightarrow;": "\u21d2", + "Ropf;": "\u211d", + "RoundImplies;": "\u2970", + "Rrightarrow;": "\u21db", + "Rscr;": "\u211b", + "Rsh;": "\u21b1", + "RuleDelayed;": "\u29f4", + "SHCHcy;": "\u0429", + "SHcy;": "\u0428", + "SOFTcy;": "\u042c", + "Sacute;": "\u015a", + "Sc;": "\u2abc", + "Scaron;": "\u0160", + "Scedil;": "\u015e", + "Scirc;": "\u015c", + "Scy;": "\u0421", + "Sfr;": "\U0001d516", + "ShortDownArrow;": "\u2193", + "ShortLeftArrow;": "\u2190", + "ShortRightArrow;": "\u2192", + "ShortUpArrow;": "\u2191", + "Sigma;": "\u03a3", + "SmallCircle;": "\u2218", + "Sopf;": "\U0001d54a", + "Sqrt;": "\u221a", + "Square;": "\u25a1", + "SquareIntersection;": "\u2293", + "SquareSubset;": "\u228f", + "SquareSubsetEqual;": "\u2291", + "SquareSuperset;": "\u2290", + "SquareSupersetEqual;": "\u2292", + "SquareUnion;": "\u2294", + "Sscr;": "\U0001d4ae", + "Star;": "\u22c6", + "Sub;": "\u22d0", + "Subset;": "\u22d0", + "SubsetEqual;": "\u2286", + "Succeeds;": "\u227b", + "SucceedsEqual;": "\u2ab0", + "SucceedsSlantEqual;": "\u227d", + "SucceedsTilde;": "\u227f", + "SuchThat;": "\u220b", + "Sum;": "\u2211", + "Sup;": "\u22d1", + "Superset;": "\u2283", + "SupersetEqual;": "\u2287", + "Supset;": "\u22d1", + "THORN": "\xde", + "THORN;": "\xde", + "TRADE;": "\u2122", + "TSHcy;": "\u040b", + "TScy;": "\u0426", + "Tab;": "\t", + "Tau;": "\u03a4", + "Tcaron;": "\u0164", + "Tcedil;": "\u0162", + "Tcy;": "\u0422", + "Tfr;": "\U0001d517", + "Therefore;": "\u2234", + "Theta;": "\u0398", + "ThickSpace;": "\u205f\u200a", + "ThinSpace;": "\u2009", + "Tilde;": "\u223c", + "TildeEqual;": "\u2243", + "TildeFullEqual;": "\u2245", + "TildeTilde;": "\u2248", + "Topf;": "\U0001d54b", + "TripleDot;": "\u20db", + "Tscr;": "\U0001d4af", + "Tstrok;": "\u0166", + "Uacute": "\xda", + "Uacute;": "\xda", + "Uarr;": "\u219f", + "Uarrocir;": "\u2949", + "Ubrcy;": "\u040e", + "Ubreve;": "\u016c", + "Ucirc": "\xdb", + "Ucirc;": "\xdb", + "Ucy;": "\u0423", + "Udblac;": "\u0170", + "Ufr;": "\U0001d518", + "Ugrave": "\xd9", + "Ugrave;": "\xd9", + "Umacr;": "\u016a", + "UnderBar;": "_", + "UnderBrace;": "\u23df", + "UnderBracket;": "\u23b5", + "UnderParenthesis;": "\u23dd", + "Union;": "\u22c3", + "UnionPlus;": "\u228e", + "Uogon;": "\u0172", + "Uopf;": "\U0001d54c", + "UpArrow;": "\u2191", + "UpArrowBar;": "\u2912", + "UpArrowDownArrow;": "\u21c5", + "UpDownArrow;": "\u2195", + "UpEquilibrium;": "\u296e", + "UpTee;": "\u22a5", + "UpTeeArrow;": "\u21a5", + "Uparrow;": "\u21d1", + "Updownarrow;": "\u21d5", + "UpperLeftArrow;": "\u2196", + "UpperRightArrow;": "\u2197", + "Upsi;": "\u03d2", + "Upsilon;": "\u03a5", + "Uring;": "\u016e", + "Uscr;": "\U0001d4b0", + "Utilde;": "\u0168", + "Uuml": "\xdc", + "Uuml;": "\xdc", + "VDash;": "\u22ab", + "Vbar;": "\u2aeb", + "Vcy;": "\u0412", + "Vdash;": "\u22a9", + "Vdashl;": "\u2ae6", + "Vee;": "\u22c1", + "Verbar;": "\u2016", + "Vert;": "\u2016", + "VerticalBar;": "\u2223", + "VerticalLine;": "|", + "VerticalSeparator;": "\u2758", + "VerticalTilde;": "\u2240", + "VeryThinSpace;": "\u200a", + "Vfr;": "\U0001d519", + "Vopf;": "\U0001d54d", + "Vscr;": "\U0001d4b1", + "Vvdash;": "\u22aa", + "Wcirc;": "\u0174", + "Wedge;": "\u22c0", + "Wfr;": "\U0001d51a", + "Wopf;": "\U0001d54e", + "Wscr;": "\U0001d4b2", + "Xfr;": "\U0001d51b", + "Xi;": "\u039e", + "Xopf;": "\U0001d54f", + "Xscr;": "\U0001d4b3", + "YAcy;": "\u042f", + "YIcy;": "\u0407", + "YUcy;": "\u042e", + "Yacute": "\xdd", + "Yacute;": "\xdd", + "Ycirc;": "\u0176", + "Ycy;": "\u042b", + "Yfr;": "\U0001d51c", + "Yopf;": "\U0001d550", + "Yscr;": "\U0001d4b4", + "Yuml;": "\u0178", + "ZHcy;": "\u0416", + "Zacute;": "\u0179", + "Zcaron;": "\u017d", + "Zcy;": "\u0417", + "Zdot;": "\u017b", + "ZeroWidthSpace;": "\u200b", + "Zeta;": "\u0396", + "Zfr;": "\u2128", + "Zopf;": "\u2124", + "Zscr;": "\U0001d4b5", + "aacute": "\xe1", + "aacute;": "\xe1", + "abreve;": "\u0103", + "ac;": "\u223e", + "acE;": "\u223e\u0333", + "acd;": "\u223f", + "acirc": "\xe2", + "acirc;": "\xe2", + "acute": "\xb4", + "acute;": "\xb4", + "acy;": "\u0430", + "aelig": "\xe6", + "aelig;": "\xe6", + "af;": "\u2061", + "afr;": "\U0001d51e", + "agrave": "\xe0", + "agrave;": "\xe0", + "alefsym;": "\u2135", + "aleph;": "\u2135", + "alpha;": "\u03b1", + "amacr;": "\u0101", + "amalg;": "\u2a3f", + "amp": "&", + "amp;": "&", + "and;": "\u2227", + "andand;": "\u2a55", + "andd;": "\u2a5c", + "andslope;": "\u2a58", + "andv;": "\u2a5a", + "ang;": "\u2220", + "ange;": "\u29a4", + "angle;": "\u2220", + "angmsd;": "\u2221", + "angmsdaa;": "\u29a8", + "angmsdab;": "\u29a9", + "angmsdac;": "\u29aa", + "angmsdad;": "\u29ab", + "angmsdae;": "\u29ac", + "angmsdaf;": "\u29ad", + "angmsdag;": "\u29ae", + "angmsdah;": "\u29af", + "angrt;": "\u221f", + "angrtvb;": "\u22be", + "angrtvbd;": "\u299d", + "angsph;": "\u2222", + "angst;": "\xc5", + "angzarr;": "\u237c", + "aogon;": "\u0105", + "aopf;": "\U0001d552", + "ap;": "\u2248", + "apE;": "\u2a70", + "apacir;": "\u2a6f", + "ape;": "\u224a", + "apid;": "\u224b", + "apos;": "'", + "approx;": "\u2248", + "approxeq;": "\u224a", + "aring": "\xe5", + "aring;": "\xe5", + "ascr;": "\U0001d4b6", + "ast;": "*", + "asymp;": "\u2248", + "asympeq;": "\u224d", + "atilde": "\xe3", + "atilde;": "\xe3", + "auml": "\xe4", + "auml;": "\xe4", + "awconint;": "\u2233", + "awint;": "\u2a11", + "bNot;": "\u2aed", + "backcong;": "\u224c", + "backepsilon;": "\u03f6", + "backprime;": "\u2035", + "backsim;": "\u223d", + "backsimeq;": "\u22cd", + "barvee;": "\u22bd", + "barwed;": "\u2305", + "barwedge;": "\u2305", + "bbrk;": "\u23b5", + "bbrktbrk;": "\u23b6", + "bcong;": "\u224c", + "bcy;": "\u0431", + "bdquo;": "\u201e", + "becaus;": "\u2235", + "because;": "\u2235", + "bemptyv;": "\u29b0", + "bepsi;": "\u03f6", + "bernou;": "\u212c", + "beta;": "\u03b2", + "beth;": "\u2136", + "between;": "\u226c", + "bfr;": "\U0001d51f", + "bigcap;": "\u22c2", + "bigcirc;": "\u25ef", + "bigcup;": "\u22c3", + "bigodot;": "\u2a00", + "bigoplus;": "\u2a01", + "bigotimes;": "\u2a02", + "bigsqcup;": "\u2a06", + "bigstar;": "\u2605", + "bigtriangledown;": "\u25bd", + "bigtriangleup;": "\u25b3", + "biguplus;": "\u2a04", + "bigvee;": "\u22c1", + "bigwedge;": "\u22c0", + "bkarow;": "\u290d", + "blacklozenge;": "\u29eb", + "blacksquare;": "\u25aa", + "blacktriangle;": "\u25b4", + "blacktriangledown;": "\u25be", + "blacktriangleleft;": "\u25c2", + "blacktriangleright;": "\u25b8", + "blank;": "\u2423", + "blk12;": "\u2592", + "blk14;": "\u2591", + "blk34;": "\u2593", + "block;": "\u2588", + "bne;": "=\u20e5", + "bnequiv;": "\u2261\u20e5", + "bnot;": "\u2310", + "bopf;": "\U0001d553", + "bot;": "\u22a5", + "bottom;": "\u22a5", + "bowtie;": "\u22c8", + "boxDL;": "\u2557", + "boxDR;": "\u2554", + "boxDl;": "\u2556", + "boxDr;": "\u2553", + "boxH;": "\u2550", + "boxHD;": "\u2566", + "boxHU;": "\u2569", + "boxHd;": "\u2564", + "boxHu;": "\u2567", + "boxUL;": "\u255d", + "boxUR;": "\u255a", + "boxUl;": "\u255c", + "boxUr;": "\u2559", + "boxV;": "\u2551", + "boxVH;": "\u256c", + "boxVL;": "\u2563", + "boxVR;": "\u2560", + "boxVh;": "\u256b", + "boxVl;": "\u2562", + "boxVr;": "\u255f", + "boxbox;": "\u29c9", + "boxdL;": "\u2555", + "boxdR;": "\u2552", + "boxdl;": "\u2510", + "boxdr;": "\u250c", + "boxh;": "\u2500", + "boxhD;": "\u2565", + "boxhU;": "\u2568", + "boxhd;": "\u252c", + "boxhu;": "\u2534", + "boxminus;": "\u229f", + "boxplus;": "\u229e", + "boxtimes;": "\u22a0", + "boxuL;": "\u255b", + "boxuR;": "\u2558", + "boxul;": "\u2518", + "boxur;": "\u2514", + "boxv;": "\u2502", + "boxvH;": "\u256a", + "boxvL;": "\u2561", + "boxvR;": "\u255e", + "boxvh;": "\u253c", + "boxvl;": "\u2524", + "boxvr;": "\u251c", + "bprime;": "\u2035", + "breve;": "\u02d8", + "brvbar": "\xa6", + "brvbar;": "\xa6", + "bscr;": "\U0001d4b7", + "bsemi;": "\u204f", + "bsim;": "\u223d", + "bsime;": "\u22cd", + "bsol;": "\\", + "bsolb;": "\u29c5", + "bsolhsub;": "\u27c8", + "bull;": "\u2022", + "bullet;": "\u2022", + "bump;": "\u224e", + "bumpE;": "\u2aae", + "bumpe;": "\u224f", + "bumpeq;": "\u224f", + "cacute;": "\u0107", + "cap;": "\u2229", + "capand;": "\u2a44", + "capbrcup;": "\u2a49", + "capcap;": "\u2a4b", + "capcup;": "\u2a47", + "capdot;": "\u2a40", + "caps;": "\u2229\ufe00", + "caret;": "\u2041", + "caron;": "\u02c7", + "ccaps;": "\u2a4d", + "ccaron;": "\u010d", + "ccedil": "\xe7", + "ccedil;": "\xe7", + "ccirc;": "\u0109", + "ccups;": "\u2a4c", + "ccupssm;": "\u2a50", + "cdot;": "\u010b", + "cedil": "\xb8", + "cedil;": "\xb8", + "cemptyv;": "\u29b2", + "cent": "\xa2", + "cent;": "\xa2", + "centerdot;": "\xb7", + "cfr;": "\U0001d520", + "chcy;": "\u0447", + "check;": "\u2713", + "checkmark;": "\u2713", + "chi;": "\u03c7", + "cir;": "\u25cb", + "cirE;": "\u29c3", + "circ;": "\u02c6", + "circeq;": "\u2257", + "circlearrowleft;": "\u21ba", + "circlearrowright;": "\u21bb", + "circledR;": "\xae", + "circledS;": "\u24c8", + "circledast;": "\u229b", + "circledcirc;": "\u229a", + "circleddash;": "\u229d", + "cire;": "\u2257", + "cirfnint;": "\u2a10", + "cirmid;": "\u2aef", + "cirscir;": "\u29c2", + "clubs;": "\u2663", + "clubsuit;": "\u2663", + "colon;": ":", + "colone;": "\u2254", + "coloneq;": "\u2254", + "comma;": ",", + "commat;": "@", + "comp;": "\u2201", + "compfn;": "\u2218", + "complement;": "\u2201", + "complexes;": "\u2102", + "cong;": "\u2245", + "congdot;": "\u2a6d", + "conint;": "\u222e", + "copf;": "\U0001d554", + "coprod;": "\u2210", + "copy": "\xa9", + "copy;": "\xa9", + "copysr;": "\u2117", + "crarr;": "\u21b5", + "cross;": "\u2717", + "cscr;": "\U0001d4b8", + "csub;": "\u2acf", + "csube;": "\u2ad1", + "csup;": "\u2ad0", + "csupe;": "\u2ad2", + "ctdot;": "\u22ef", + "cudarrl;": "\u2938", + "cudarrr;": "\u2935", + "cuepr;": "\u22de", + "cuesc;": "\u22df", + "cularr;": "\u21b6", + "cularrp;": "\u293d", + "cup;": "\u222a", + "cupbrcap;": "\u2a48", + "cupcap;": "\u2a46", + "cupcup;": "\u2a4a", + "cupdot;": "\u228d", + "cupor;": "\u2a45", + "cups;": "\u222a\ufe00", + "curarr;": "\u21b7", + "curarrm;": "\u293c", + "curlyeqprec;": "\u22de", + "curlyeqsucc;": "\u22df", + "curlyvee;": "\u22ce", + "curlywedge;": "\u22cf", + "curren": "\xa4", + "curren;": "\xa4", + "curvearrowleft;": "\u21b6", + "curvearrowright;": "\u21b7", + "cuvee;": "\u22ce", + "cuwed;": "\u22cf", + "cwconint;": "\u2232", + "cwint;": "\u2231", + "cylcty;": "\u232d", + "dArr;": "\u21d3", + "dHar;": "\u2965", + "dagger;": "\u2020", + "daleth;": "\u2138", + "darr;": "\u2193", + "dash;": "\u2010", + "dashv;": "\u22a3", + "dbkarow;": "\u290f", + "dblac;": "\u02dd", + "dcaron;": "\u010f", + "dcy;": "\u0434", + "dd;": "\u2146", + "ddagger;": "\u2021", + "ddarr;": "\u21ca", + "ddotseq;": "\u2a77", + "deg": "\xb0", + "deg;": "\xb0", + "delta;": "\u03b4", + "demptyv;": "\u29b1", + "dfisht;": "\u297f", + "dfr;": "\U0001d521", + "dharl;": "\u21c3", + "dharr;": "\u21c2", + "diam;": "\u22c4", + "diamond;": "\u22c4", + "diamondsuit;": "\u2666", + "diams;": "\u2666", + "die;": "\xa8", + "digamma;": "\u03dd", + "disin;": "\u22f2", + "div;": "\xf7", + "divide": "\xf7", + "divide;": "\xf7", + "divideontimes;": "\u22c7", + "divonx;": "\u22c7", + "djcy;": "\u0452", + "dlcorn;": "\u231e", + "dlcrop;": "\u230d", + "dollar;": "$", + "dopf;": "\U0001d555", + "dot;": "\u02d9", + "doteq;": "\u2250", + "doteqdot;": "\u2251", + "dotminus;": "\u2238", + "dotplus;": "\u2214", + "dotsquare;": "\u22a1", + "doublebarwedge;": "\u2306", + "downarrow;": "\u2193", + "downdownarrows;": "\u21ca", + "downharpoonleft;": "\u21c3", + "downharpoonright;": "\u21c2", + "drbkarow;": "\u2910", + "drcorn;": "\u231f", + "drcrop;": "\u230c", + "dscr;": "\U0001d4b9", + "dscy;": "\u0455", + "dsol;": "\u29f6", + "dstrok;": "\u0111", + "dtdot;": "\u22f1", + "dtri;": "\u25bf", + "dtrif;": "\u25be", + "duarr;": "\u21f5", + "duhar;": "\u296f", + "dwangle;": "\u29a6", + "dzcy;": "\u045f", + "dzigrarr;": "\u27ff", + "eDDot;": "\u2a77", + "eDot;": "\u2251", + "eacute": "\xe9", + "eacute;": "\xe9", + "easter;": "\u2a6e", + "ecaron;": "\u011b", + "ecir;": "\u2256", + "ecirc": "\xea", + "ecirc;": "\xea", + "ecolon;": "\u2255", + "ecy;": "\u044d", + "edot;": "\u0117", + "ee;": "\u2147", + "efDot;": "\u2252", + "efr;": "\U0001d522", + "eg;": "\u2a9a", + "egrave": "\xe8", + "egrave;": "\xe8", + "egs;": "\u2a96", + "egsdot;": "\u2a98", + "el;": "\u2a99", + "elinters;": "\u23e7", + "ell;": "\u2113", + "els;": "\u2a95", + "elsdot;": "\u2a97", + "emacr;": "\u0113", + "empty;": "\u2205", + "emptyset;": "\u2205", + "emptyv;": "\u2205", + "emsp13;": "\u2004", + "emsp14;": "\u2005", + "emsp;": "\u2003", + "eng;": "\u014b", + "ensp;": "\u2002", + "eogon;": "\u0119", + "eopf;": "\U0001d556", + "epar;": "\u22d5", + "eparsl;": "\u29e3", + "eplus;": "\u2a71", + "epsi;": "\u03b5", + "epsilon;": "\u03b5", + "epsiv;": "\u03f5", + "eqcirc;": "\u2256", + "eqcolon;": "\u2255", + "eqsim;": "\u2242", + "eqslantgtr;": "\u2a96", + "eqslantless;": "\u2a95", + "equals;": "=", + "equest;": "\u225f", + "equiv;": "\u2261", + "equivDD;": "\u2a78", + "eqvparsl;": "\u29e5", + "erDot;": "\u2253", + "erarr;": "\u2971", + "escr;": "\u212f", + "esdot;": "\u2250", + "esim;": "\u2242", + "eta;": "\u03b7", + "eth": "\xf0", + "eth;": "\xf0", + "euml": "\xeb", + "euml;": "\xeb", + "euro;": "\u20ac", + "excl;": "!", + "exist;": "\u2203", + "expectation;": "\u2130", + "exponentiale;": "\u2147", + "fallingdotseq;": "\u2252", + "fcy;": "\u0444", + "female;": "\u2640", + "ffilig;": "\ufb03", + "fflig;": "\ufb00", + "ffllig;": "\ufb04", + "ffr;": "\U0001d523", + "filig;": "\ufb01", + "fjlig;": "fj", + "flat;": "\u266d", + "fllig;": "\ufb02", + "fltns;": "\u25b1", + "fnof;": "\u0192", + "fopf;": "\U0001d557", + "forall;": "\u2200", + "fork;": "\u22d4", + "forkv;": "\u2ad9", + "fpartint;": "\u2a0d", + "frac12": "\xbd", + "frac12;": "\xbd", + "frac13;": "\u2153", + "frac14": "\xbc", + "frac14;": "\xbc", + "frac15;": "\u2155", + "frac16;": "\u2159", + "frac18;": "\u215b", + "frac23;": "\u2154", + "frac25;": "\u2156", + "frac34": "\xbe", + "frac34;": "\xbe", + "frac35;": "\u2157", + "frac38;": "\u215c", + "frac45;": "\u2158", + "frac56;": "\u215a", + "frac58;": "\u215d", + "frac78;": "\u215e", + "frasl;": "\u2044", + "frown;": "\u2322", + "fscr;": "\U0001d4bb", + "gE;": "\u2267", + "gEl;": "\u2a8c", + "gacute;": "\u01f5", + "gamma;": "\u03b3", + "gammad;": "\u03dd", + "gap;": "\u2a86", + "gbreve;": "\u011f", + "gcirc;": "\u011d", + "gcy;": "\u0433", + "gdot;": "\u0121", + "ge;": "\u2265", + "gel;": "\u22db", + "geq;": "\u2265", + "geqq;": "\u2267", + "geqslant;": "\u2a7e", + "ges;": "\u2a7e", + "gescc;": "\u2aa9", + "gesdot;": "\u2a80", + "gesdoto;": "\u2a82", + "gesdotol;": "\u2a84", + "gesl;": "\u22db\ufe00", + "gesles;": "\u2a94", + "gfr;": "\U0001d524", + "gg;": "\u226b", + "ggg;": "\u22d9", + "gimel;": "\u2137", + "gjcy;": "\u0453", + "gl;": "\u2277", + "glE;": "\u2a92", + "gla;": "\u2aa5", + "glj;": "\u2aa4", + "gnE;": "\u2269", + "gnap;": "\u2a8a", + "gnapprox;": "\u2a8a", + "gne;": "\u2a88", + "gneq;": "\u2a88", + "gneqq;": "\u2269", + "gnsim;": "\u22e7", + "gopf;": "\U0001d558", + "grave;": "`", + "gscr;": "\u210a", + "gsim;": "\u2273", + "gsime;": "\u2a8e", + "gsiml;": "\u2a90", + "gt": ">", + "gt;": ">", + "gtcc;": "\u2aa7", + "gtcir;": "\u2a7a", + "gtdot;": "\u22d7", + "gtlPar;": "\u2995", + "gtquest;": "\u2a7c", + "gtrapprox;": "\u2a86", + "gtrarr;": "\u2978", + "gtrdot;": "\u22d7", + "gtreqless;": "\u22db", + "gtreqqless;": "\u2a8c", + "gtrless;": "\u2277", + "gtrsim;": "\u2273", + "gvertneqq;": "\u2269\ufe00", + "gvnE;": "\u2269\ufe00", + "hArr;": "\u21d4", + "hairsp;": "\u200a", + "half;": "\xbd", + "hamilt;": "\u210b", + "hardcy;": "\u044a", + "harr;": "\u2194", + "harrcir;": "\u2948", + "harrw;": "\u21ad", + "hbar;": "\u210f", + "hcirc;": "\u0125", + "hearts;": "\u2665", + "heartsuit;": "\u2665", + "hellip;": "\u2026", + "hercon;": "\u22b9", + "hfr;": "\U0001d525", + "hksearow;": "\u2925", + "hkswarow;": "\u2926", + "hoarr;": "\u21ff", + "homtht;": "\u223b", + "hookleftarrow;": "\u21a9", + "hookrightarrow;": "\u21aa", + "hopf;": "\U0001d559", + "horbar;": "\u2015", + "hscr;": "\U0001d4bd", + "hslash;": "\u210f", + "hstrok;": "\u0127", + "hybull;": "\u2043", + "hyphen;": "\u2010", + "iacute": "\xed", + "iacute;": "\xed", + "ic;": "\u2063", + "icirc": "\xee", + "icirc;": "\xee", + "icy;": "\u0438", + "iecy;": "\u0435", + "iexcl": "\xa1", + "iexcl;": "\xa1", + "iff;": "\u21d4", + "ifr;": "\U0001d526", + "igrave": "\xec", + "igrave;": "\xec", + "ii;": "\u2148", + "iiiint;": "\u2a0c", + "iiint;": "\u222d", + "iinfin;": "\u29dc", + "iiota;": "\u2129", + "ijlig;": "\u0133", + "imacr;": "\u012b", + "image;": "\u2111", + "imagline;": "\u2110", + "imagpart;": "\u2111", + "imath;": "\u0131", + "imof;": "\u22b7", + "imped;": "\u01b5", + "in;": "\u2208", + "incare;": "\u2105", + "infin;": "\u221e", + "infintie;": "\u29dd", + "inodot;": "\u0131", + "int;": "\u222b", + "intcal;": "\u22ba", + "integers;": "\u2124", + "intercal;": "\u22ba", + "intlarhk;": "\u2a17", + "intprod;": "\u2a3c", + "iocy;": "\u0451", + "iogon;": "\u012f", + "iopf;": "\U0001d55a", + "iota;": "\u03b9", + "iprod;": "\u2a3c", + "iquest": "\xbf", + "iquest;": "\xbf", + "iscr;": "\U0001d4be", + "isin;": "\u2208", + "isinE;": "\u22f9", + "isindot;": "\u22f5", + "isins;": "\u22f4", + "isinsv;": "\u22f3", + "isinv;": "\u2208", + "it;": "\u2062", + "itilde;": "\u0129", + "iukcy;": "\u0456", + "iuml": "\xef", + "iuml;": "\xef", + "jcirc;": "\u0135", + "jcy;": "\u0439", + "jfr;": "\U0001d527", + "jmath;": "\u0237", + "jopf;": "\U0001d55b", + "jscr;": "\U0001d4bf", + "jsercy;": "\u0458", + "jukcy;": "\u0454", + "kappa;": "\u03ba", + "kappav;": "\u03f0", + "kcedil;": "\u0137", + "kcy;": "\u043a", + "kfr;": "\U0001d528", + "kgreen;": "\u0138", + "khcy;": "\u0445", + "kjcy;": "\u045c", + "kopf;": "\U0001d55c", + "kscr;": "\U0001d4c0", + "lAarr;": "\u21da", + "lArr;": "\u21d0", + "lAtail;": "\u291b", + "lBarr;": "\u290e", + "lE;": "\u2266", + "lEg;": "\u2a8b", + "lHar;": "\u2962", + "lacute;": "\u013a", + "laemptyv;": "\u29b4", + "lagran;": "\u2112", + "lambda;": "\u03bb", + "lang;": "\u27e8", + "langd;": "\u2991", + "langle;": "\u27e8", + "lap;": "\u2a85", + "laquo": "\xab", + "laquo;": "\xab", + "larr;": "\u2190", + "larrb;": "\u21e4", + "larrbfs;": "\u291f", + "larrfs;": "\u291d", + "larrhk;": "\u21a9", + "larrlp;": "\u21ab", + "larrpl;": "\u2939", + "larrsim;": "\u2973", + "larrtl;": "\u21a2", + "lat;": "\u2aab", + "latail;": "\u2919", + "late;": "\u2aad", + "lates;": "\u2aad\ufe00", + "lbarr;": "\u290c", + "lbbrk;": "\u2772", + "lbrace;": "{", + "lbrack;": "[", + "lbrke;": "\u298b", + "lbrksld;": "\u298f", + "lbrkslu;": "\u298d", + "lcaron;": "\u013e", + "lcedil;": "\u013c", + "lceil;": "\u2308", + "lcub;": "{", + "lcy;": "\u043b", + "ldca;": "\u2936", + "ldquo;": "\u201c", + "ldquor;": "\u201e", + "ldrdhar;": "\u2967", + "ldrushar;": "\u294b", + "ldsh;": "\u21b2", + "le;": "\u2264", + "leftarrow;": "\u2190", + "leftarrowtail;": "\u21a2", + "leftharpoondown;": "\u21bd", + "leftharpoonup;": "\u21bc", + "leftleftarrows;": "\u21c7", + "leftrightarrow;": "\u2194", + "leftrightarrows;": "\u21c6", + "leftrightharpoons;": "\u21cb", + "leftrightsquigarrow;": "\u21ad", + "leftthreetimes;": "\u22cb", + "leg;": "\u22da", + "leq;": "\u2264", + "leqq;": "\u2266", + "leqslant;": "\u2a7d", + "les;": "\u2a7d", + "lescc;": "\u2aa8", + "lesdot;": "\u2a7f", + "lesdoto;": "\u2a81", + "lesdotor;": "\u2a83", + "lesg;": "\u22da\ufe00", + "lesges;": "\u2a93", + "lessapprox;": "\u2a85", + "lessdot;": "\u22d6", + "lesseqgtr;": "\u22da", + "lesseqqgtr;": "\u2a8b", + "lessgtr;": "\u2276", + "lesssim;": "\u2272", + "lfisht;": "\u297c", + "lfloor;": "\u230a", + "lfr;": "\U0001d529", + "lg;": "\u2276", + "lgE;": "\u2a91", + "lhard;": "\u21bd", + "lharu;": "\u21bc", + "lharul;": "\u296a", + "lhblk;": "\u2584", + "ljcy;": "\u0459", + "ll;": "\u226a", + "llarr;": "\u21c7", + "llcorner;": "\u231e", + "llhard;": "\u296b", + "lltri;": "\u25fa", + "lmidot;": "\u0140", + "lmoust;": "\u23b0", + "lmoustache;": "\u23b0", + "lnE;": "\u2268", + "lnap;": "\u2a89", + "lnapprox;": "\u2a89", + "lne;": "\u2a87", + "lneq;": "\u2a87", + "lneqq;": "\u2268", + "lnsim;": "\u22e6", + "loang;": "\u27ec", + "loarr;": "\u21fd", + "lobrk;": "\u27e6", + "longleftarrow;": "\u27f5", + "longleftrightarrow;": "\u27f7", + "longmapsto;": "\u27fc", + "longrightarrow;": "\u27f6", + "looparrowleft;": "\u21ab", + "looparrowright;": "\u21ac", + "lopar;": "\u2985", + "lopf;": "\U0001d55d", + "loplus;": "\u2a2d", + "lotimes;": "\u2a34", + "lowast;": "\u2217", + "lowbar;": "_", + "loz;": "\u25ca", + "lozenge;": "\u25ca", + "lozf;": "\u29eb", + "lpar;": "(", + "lparlt;": "\u2993", + "lrarr;": "\u21c6", + "lrcorner;": "\u231f", + "lrhar;": "\u21cb", + "lrhard;": "\u296d", + "lrm;": "\u200e", + "lrtri;": "\u22bf", + "lsaquo;": "\u2039", + "lscr;": "\U0001d4c1", + "lsh;": "\u21b0", + "lsim;": "\u2272", + "lsime;": "\u2a8d", + "lsimg;": "\u2a8f", + "lsqb;": "[", + "lsquo;": "\u2018", + "lsquor;": "\u201a", + "lstrok;": "\u0142", + "lt": "<", + "lt;": "<", + "ltcc;": "\u2aa6", + "ltcir;": "\u2a79", + "ltdot;": "\u22d6", + "lthree;": "\u22cb", + "ltimes;": "\u22c9", + "ltlarr;": "\u2976", + "ltquest;": "\u2a7b", + "ltrPar;": "\u2996", + "ltri;": "\u25c3", + "ltrie;": "\u22b4", + "ltrif;": "\u25c2", + "lurdshar;": "\u294a", + "luruhar;": "\u2966", + "lvertneqq;": "\u2268\ufe00", + "lvnE;": "\u2268\ufe00", + "mDDot;": "\u223a", + "macr": "\xaf", + "macr;": "\xaf", + "male;": "\u2642", + "malt;": "\u2720", + "maltese;": "\u2720", + "map;": "\u21a6", + "mapsto;": "\u21a6", + "mapstodown;": "\u21a7", + "mapstoleft;": "\u21a4", + "mapstoup;": "\u21a5", + "marker;": "\u25ae", + "mcomma;": "\u2a29", + "mcy;": "\u043c", + "mdash;": "\u2014", + "measuredangle;": "\u2221", + "mfr;": "\U0001d52a", + "mho;": "\u2127", + "micro": "\xb5", + "micro;": "\xb5", + "mid;": "\u2223", + "midast;": "*", + "midcir;": "\u2af0", + "middot": "\xb7", + "middot;": "\xb7", + "minus;": "\u2212", + "minusb;": "\u229f", + "minusd;": "\u2238", + "minusdu;": "\u2a2a", + "mlcp;": "\u2adb", + "mldr;": "\u2026", + "mnplus;": "\u2213", + "models;": "\u22a7", + "mopf;": "\U0001d55e", + "mp;": "\u2213", + "mscr;": "\U0001d4c2", + "mstpos;": "\u223e", + "mu;": "\u03bc", + "multimap;": "\u22b8", + "mumap;": "\u22b8", + "nGg;": "\u22d9\u0338", + "nGt;": "\u226b\u20d2", + "nGtv;": "\u226b\u0338", + "nLeftarrow;": "\u21cd", + "nLeftrightarrow;": "\u21ce", + "nLl;": "\u22d8\u0338", + "nLt;": "\u226a\u20d2", + "nLtv;": "\u226a\u0338", + "nRightarrow;": "\u21cf", + "nVDash;": "\u22af", + "nVdash;": "\u22ae", + "nabla;": "\u2207", + "nacute;": "\u0144", + "nang;": "\u2220\u20d2", + "nap;": "\u2249", + "napE;": "\u2a70\u0338", + "napid;": "\u224b\u0338", + "napos;": "\u0149", + "napprox;": "\u2249", + "natur;": "\u266e", + "natural;": "\u266e", + "naturals;": "\u2115", + "nbsp": "\xa0", + "nbsp;": "\xa0", + "nbump;": "\u224e\u0338", + "nbumpe;": "\u224f\u0338", + "ncap;": "\u2a43", + "ncaron;": "\u0148", + "ncedil;": "\u0146", + "ncong;": "\u2247", + "ncongdot;": "\u2a6d\u0338", + "ncup;": "\u2a42", + "ncy;": "\u043d", + "ndash;": "\u2013", + "ne;": "\u2260", + "neArr;": "\u21d7", + "nearhk;": "\u2924", + "nearr;": "\u2197", + "nearrow;": "\u2197", + "nedot;": "\u2250\u0338", + "nequiv;": "\u2262", + "nesear;": "\u2928", + "nesim;": "\u2242\u0338", + "nexist;": "\u2204", + "nexists;": "\u2204", + "nfr;": "\U0001d52b", + "ngE;": "\u2267\u0338", + "nge;": "\u2271", + "ngeq;": "\u2271", + "ngeqq;": "\u2267\u0338", + "ngeqslant;": "\u2a7e\u0338", + "nges;": "\u2a7e\u0338", + "ngsim;": "\u2275", + "ngt;": "\u226f", + "ngtr;": "\u226f", + "nhArr;": "\u21ce", + "nharr;": "\u21ae", + "nhpar;": "\u2af2", + "ni;": "\u220b", + "nis;": "\u22fc", + "nisd;": "\u22fa", + "niv;": "\u220b", + "njcy;": "\u045a", + "nlArr;": "\u21cd", + "nlE;": "\u2266\u0338", + "nlarr;": "\u219a", + "nldr;": "\u2025", + "nle;": "\u2270", + "nleftarrow;": "\u219a", + "nleftrightarrow;": "\u21ae", + "nleq;": "\u2270", + "nleqq;": "\u2266\u0338", + "nleqslant;": "\u2a7d\u0338", + "nles;": "\u2a7d\u0338", + "nless;": "\u226e", + "nlsim;": "\u2274", + "nlt;": "\u226e", + "nltri;": "\u22ea", + "nltrie;": "\u22ec", + "nmid;": "\u2224", + "nopf;": "\U0001d55f", + "not": "\xac", + "not;": "\xac", + "notin;": "\u2209", + "notinE;": "\u22f9\u0338", + "notindot;": "\u22f5\u0338", + "notinva;": "\u2209", + "notinvb;": "\u22f7", + "notinvc;": "\u22f6", + "notni;": "\u220c", + "notniva;": "\u220c", + "notnivb;": "\u22fe", + "notnivc;": "\u22fd", + "npar;": "\u2226", + "nparallel;": "\u2226", + "nparsl;": "\u2afd\u20e5", + "npart;": "\u2202\u0338", + "npolint;": "\u2a14", + "npr;": "\u2280", + "nprcue;": "\u22e0", + "npre;": "\u2aaf\u0338", + "nprec;": "\u2280", + "npreceq;": "\u2aaf\u0338", + "nrArr;": "\u21cf", + "nrarr;": "\u219b", + "nrarrc;": "\u2933\u0338", + "nrarrw;": "\u219d\u0338", + "nrightarrow;": "\u219b", + "nrtri;": "\u22eb", + "nrtrie;": "\u22ed", + "nsc;": "\u2281", + "nsccue;": "\u22e1", + "nsce;": "\u2ab0\u0338", + "nscr;": "\U0001d4c3", + "nshortmid;": "\u2224", + "nshortparallel;": "\u2226", + "nsim;": "\u2241", + "nsime;": "\u2244", + "nsimeq;": "\u2244", + "nsmid;": "\u2224", + "nspar;": "\u2226", + "nsqsube;": "\u22e2", + "nsqsupe;": "\u22e3", + "nsub;": "\u2284", + "nsubE;": "\u2ac5\u0338", + "nsube;": "\u2288", + "nsubset;": "\u2282\u20d2", + "nsubseteq;": "\u2288", + "nsubseteqq;": "\u2ac5\u0338", + "nsucc;": "\u2281", + "nsucceq;": "\u2ab0\u0338", + "nsup;": "\u2285", + "nsupE;": "\u2ac6\u0338", + "nsupe;": "\u2289", + "nsupset;": "\u2283\u20d2", + "nsupseteq;": "\u2289", + "nsupseteqq;": "\u2ac6\u0338", + "ntgl;": "\u2279", + "ntilde": "\xf1", + "ntilde;": "\xf1", + "ntlg;": "\u2278", + "ntriangleleft;": "\u22ea", + "ntrianglelefteq;": "\u22ec", + "ntriangleright;": "\u22eb", + "ntrianglerighteq;": "\u22ed", + "nu;": "\u03bd", + "num;": "#", + "numero;": "\u2116", + "numsp;": "\u2007", + "nvDash;": "\u22ad", + "nvHarr;": "\u2904", + "nvap;": "\u224d\u20d2", + "nvdash;": "\u22ac", + "nvge;": "\u2265\u20d2", + "nvgt;": ">\u20d2", + "nvinfin;": "\u29de", + "nvlArr;": "\u2902", + "nvle;": "\u2264\u20d2", + "nvlt;": "<\u20d2", + "nvltrie;": "\u22b4\u20d2", + "nvrArr;": "\u2903", + "nvrtrie;": "\u22b5\u20d2", + "nvsim;": "\u223c\u20d2", + "nwArr;": "\u21d6", + "nwarhk;": "\u2923", + "nwarr;": "\u2196", + "nwarrow;": "\u2196", + "nwnear;": "\u2927", + "oS;": "\u24c8", + "oacute": "\xf3", + "oacute;": "\xf3", + "oast;": "\u229b", + "ocir;": "\u229a", + "ocirc": "\xf4", + "ocirc;": "\xf4", + "ocy;": "\u043e", + "odash;": "\u229d", + "odblac;": "\u0151", + "odiv;": "\u2a38", + "odot;": "\u2299", + "odsold;": "\u29bc", + "oelig;": "\u0153", + "ofcir;": "\u29bf", + "ofr;": "\U0001d52c", + "ogon;": "\u02db", + "ograve": "\xf2", + "ograve;": "\xf2", + "ogt;": "\u29c1", + "ohbar;": "\u29b5", + "ohm;": "\u03a9", + "oint;": "\u222e", + "olarr;": "\u21ba", + "olcir;": "\u29be", + "olcross;": "\u29bb", + "oline;": "\u203e", + "olt;": "\u29c0", + "omacr;": "\u014d", + "omega;": "\u03c9", + "omicron;": "\u03bf", + "omid;": "\u29b6", + "ominus;": "\u2296", + "oopf;": "\U0001d560", + "opar;": "\u29b7", + "operp;": "\u29b9", + "oplus;": "\u2295", + "or;": "\u2228", + "orarr;": "\u21bb", + "ord;": "\u2a5d", + "order;": "\u2134", + "orderof;": "\u2134", + "ordf": "\xaa", + "ordf;": "\xaa", + "ordm": "\xba", + "ordm;": "\xba", + "origof;": "\u22b6", + "oror;": "\u2a56", + "orslope;": "\u2a57", + "orv;": "\u2a5b", + "oscr;": "\u2134", + "oslash": "\xf8", + "oslash;": "\xf8", + "osol;": "\u2298", + "otilde": "\xf5", + "otilde;": "\xf5", + "otimes;": "\u2297", + "otimesas;": "\u2a36", + "ouml": "\xf6", + "ouml;": "\xf6", + "ovbar;": "\u233d", + "par;": "\u2225", + "para": "\xb6", + "para;": "\xb6", + "parallel;": "\u2225", + "parsim;": "\u2af3", + "parsl;": "\u2afd", + "part;": "\u2202", + "pcy;": "\u043f", + "percnt;": "%", + "period;": ".", + "permil;": "\u2030", + "perp;": "\u22a5", + "pertenk;": "\u2031", + "pfr;": "\U0001d52d", + "phi;": "\u03c6", + "phiv;": "\u03d5", + "phmmat;": "\u2133", + "phone;": "\u260e", + "pi;": "\u03c0", + "pitchfork;": "\u22d4", + "piv;": "\u03d6", + "planck;": "\u210f", + "planckh;": "\u210e", + "plankv;": "\u210f", + "plus;": "+", + "plusacir;": "\u2a23", + "plusb;": "\u229e", + "pluscir;": "\u2a22", + "plusdo;": "\u2214", + "plusdu;": "\u2a25", + "pluse;": "\u2a72", + "plusmn": "\xb1", + "plusmn;": "\xb1", + "plussim;": "\u2a26", + "plustwo;": "\u2a27", + "pm;": "\xb1", + "pointint;": "\u2a15", + "popf;": "\U0001d561", + "pound": "\xa3", + "pound;": "\xa3", + "pr;": "\u227a", + "prE;": "\u2ab3", + "prap;": "\u2ab7", + "prcue;": "\u227c", + "pre;": "\u2aaf", + "prec;": "\u227a", + "precapprox;": "\u2ab7", + "preccurlyeq;": "\u227c", + "preceq;": "\u2aaf", + "precnapprox;": "\u2ab9", + "precneqq;": "\u2ab5", + "precnsim;": "\u22e8", + "precsim;": "\u227e", + "prime;": "\u2032", + "primes;": "\u2119", + "prnE;": "\u2ab5", + "prnap;": "\u2ab9", + "prnsim;": "\u22e8", + "prod;": "\u220f", + "profalar;": "\u232e", + "profline;": "\u2312", + "profsurf;": "\u2313", + "prop;": "\u221d", + "propto;": "\u221d", + "prsim;": "\u227e", + "prurel;": "\u22b0", + "pscr;": "\U0001d4c5", + "psi;": "\u03c8", + "puncsp;": "\u2008", + "qfr;": "\U0001d52e", + "qint;": "\u2a0c", + "qopf;": "\U0001d562", + "qprime;": "\u2057", + "qscr;": "\U0001d4c6", + "quaternions;": "\u210d", + "quatint;": "\u2a16", + "quest;": "?", + "questeq;": "\u225f", + "quot": "\"", + "quot;": "\"", + "rAarr;": "\u21db", + "rArr;": "\u21d2", + "rAtail;": "\u291c", + "rBarr;": "\u290f", + "rHar;": "\u2964", + "race;": "\u223d\u0331", + "racute;": "\u0155", + "radic;": "\u221a", + "raemptyv;": "\u29b3", + "rang;": "\u27e9", + "rangd;": "\u2992", + "range;": "\u29a5", + "rangle;": "\u27e9", + "raquo": "\xbb", + "raquo;": "\xbb", + "rarr;": "\u2192", + "rarrap;": "\u2975", + "rarrb;": "\u21e5", + "rarrbfs;": "\u2920", + "rarrc;": "\u2933", + "rarrfs;": "\u291e", + "rarrhk;": "\u21aa", + "rarrlp;": "\u21ac", + "rarrpl;": "\u2945", + "rarrsim;": "\u2974", + "rarrtl;": "\u21a3", + "rarrw;": "\u219d", + "ratail;": "\u291a", + "ratio;": "\u2236", + "rationals;": "\u211a", + "rbarr;": "\u290d", + "rbbrk;": "\u2773", + "rbrace;": "}", + "rbrack;": "]", + "rbrke;": "\u298c", + "rbrksld;": "\u298e", + "rbrkslu;": "\u2990", + "rcaron;": "\u0159", + "rcedil;": "\u0157", + "rceil;": "\u2309", + "rcub;": "}", + "rcy;": "\u0440", + "rdca;": "\u2937", + "rdldhar;": "\u2969", + "rdquo;": "\u201d", + "rdquor;": "\u201d", + "rdsh;": "\u21b3", + "real;": "\u211c", + "realine;": "\u211b", + "realpart;": "\u211c", + "reals;": "\u211d", + "rect;": "\u25ad", + "reg": "\xae", + "reg;": "\xae", + "rfisht;": "\u297d", + "rfloor;": "\u230b", + "rfr;": "\U0001d52f", + "rhard;": "\u21c1", + "rharu;": "\u21c0", + "rharul;": "\u296c", + "rho;": "\u03c1", + "rhov;": "\u03f1", + "rightarrow;": "\u2192", + "rightarrowtail;": "\u21a3", + "rightharpoondown;": "\u21c1", + "rightharpoonup;": "\u21c0", + "rightleftarrows;": "\u21c4", + "rightleftharpoons;": "\u21cc", + "rightrightarrows;": "\u21c9", + "rightsquigarrow;": "\u219d", + "rightthreetimes;": "\u22cc", + "ring;": "\u02da", + "risingdotseq;": "\u2253", + "rlarr;": "\u21c4", + "rlhar;": "\u21cc", + "rlm;": "\u200f", + "rmoust;": "\u23b1", + "rmoustache;": "\u23b1", + "rnmid;": "\u2aee", + "roang;": "\u27ed", + "roarr;": "\u21fe", + "robrk;": "\u27e7", + "ropar;": "\u2986", + "ropf;": "\U0001d563", + "roplus;": "\u2a2e", + "rotimes;": "\u2a35", + "rpar;": ")", + "rpargt;": "\u2994", + "rppolint;": "\u2a12", + "rrarr;": "\u21c9", + "rsaquo;": "\u203a", + "rscr;": "\U0001d4c7", + "rsh;": "\u21b1", + "rsqb;": "]", + "rsquo;": "\u2019", + "rsquor;": "\u2019", + "rthree;": "\u22cc", + "rtimes;": "\u22ca", + "rtri;": "\u25b9", + "rtrie;": "\u22b5", + "rtrif;": "\u25b8", + "rtriltri;": "\u29ce", + "ruluhar;": "\u2968", + "rx;": "\u211e", + "sacute;": "\u015b", + "sbquo;": "\u201a", + "sc;": "\u227b", + "scE;": "\u2ab4", + "scap;": "\u2ab8", + "scaron;": "\u0161", + "sccue;": "\u227d", + "sce;": "\u2ab0", + "scedil;": "\u015f", + "scirc;": "\u015d", + "scnE;": "\u2ab6", + "scnap;": "\u2aba", + "scnsim;": "\u22e9", + "scpolint;": "\u2a13", + "scsim;": "\u227f", + "scy;": "\u0441", + "sdot;": "\u22c5", + "sdotb;": "\u22a1", + "sdote;": "\u2a66", + "seArr;": "\u21d8", + "searhk;": "\u2925", + "searr;": "\u2198", + "searrow;": "\u2198", + "sect": "\xa7", + "sect;": "\xa7", + "semi;": ";", + "seswar;": "\u2929", + "setminus;": "\u2216", + "setmn;": "\u2216", + "sext;": "\u2736", + "sfr;": "\U0001d530", + "sfrown;": "\u2322", + "sharp;": "\u266f", + "shchcy;": "\u0449", + "shcy;": "\u0448", + "shortmid;": "\u2223", + "shortparallel;": "\u2225", + "shy": "\xad", + "shy;": "\xad", + "sigma;": "\u03c3", + "sigmaf;": "\u03c2", + "sigmav;": "\u03c2", + "sim;": "\u223c", + "simdot;": "\u2a6a", + "sime;": "\u2243", + "simeq;": "\u2243", + "simg;": "\u2a9e", + "simgE;": "\u2aa0", + "siml;": "\u2a9d", + "simlE;": "\u2a9f", + "simne;": "\u2246", + "simplus;": "\u2a24", + "simrarr;": "\u2972", + "slarr;": "\u2190", + "smallsetminus;": "\u2216", + "smashp;": "\u2a33", + "smeparsl;": "\u29e4", + "smid;": "\u2223", + "smile;": "\u2323", + "smt;": "\u2aaa", + "smte;": "\u2aac", + "smtes;": "\u2aac\ufe00", + "softcy;": "\u044c", + "sol;": "/", + "solb;": "\u29c4", + "solbar;": "\u233f", + "sopf;": "\U0001d564", + "spades;": "\u2660", + "spadesuit;": "\u2660", + "spar;": "\u2225", + "sqcap;": "\u2293", + "sqcaps;": "\u2293\ufe00", + "sqcup;": "\u2294", + "sqcups;": "\u2294\ufe00", + "sqsub;": "\u228f", + "sqsube;": "\u2291", + "sqsubset;": "\u228f", + "sqsubseteq;": "\u2291", + "sqsup;": "\u2290", + "sqsupe;": "\u2292", + "sqsupset;": "\u2290", + "sqsupseteq;": "\u2292", + "squ;": "\u25a1", + "square;": "\u25a1", + "squarf;": "\u25aa", + "squf;": "\u25aa", + "srarr;": "\u2192", + "sscr;": "\U0001d4c8", + "ssetmn;": "\u2216", + "ssmile;": "\u2323", + "sstarf;": "\u22c6", + "star;": "\u2606", + "starf;": "\u2605", + "straightepsilon;": "\u03f5", + "straightphi;": "\u03d5", + "strns;": "\xaf", + "sub;": "\u2282", + "subE;": "\u2ac5", + "subdot;": "\u2abd", + "sube;": "\u2286", + "subedot;": "\u2ac3", + "submult;": "\u2ac1", + "subnE;": "\u2acb", + "subne;": "\u228a", + "subplus;": "\u2abf", + "subrarr;": "\u2979", + "subset;": "\u2282", + "subseteq;": "\u2286", + "subseteqq;": "\u2ac5", + "subsetneq;": "\u228a", + "subsetneqq;": "\u2acb", + "subsim;": "\u2ac7", + "subsub;": "\u2ad5", + "subsup;": "\u2ad3", + "succ;": "\u227b", + "succapprox;": "\u2ab8", + "succcurlyeq;": "\u227d", + "succeq;": "\u2ab0", + "succnapprox;": "\u2aba", + "succneqq;": "\u2ab6", + "succnsim;": "\u22e9", + "succsim;": "\u227f", + "sum;": "\u2211", + "sung;": "\u266a", + "sup1": "\xb9", + "sup1;": "\xb9", + "sup2": "\xb2", + "sup2;": "\xb2", + "sup3": "\xb3", + "sup3;": "\xb3", + "sup;": "\u2283", + "supE;": "\u2ac6", + "supdot;": "\u2abe", + "supdsub;": "\u2ad8", + "supe;": "\u2287", + "supedot;": "\u2ac4", + "suphsol;": "\u27c9", + "suphsub;": "\u2ad7", + "suplarr;": "\u297b", + "supmult;": "\u2ac2", + "supnE;": "\u2acc", + "supne;": "\u228b", + "supplus;": "\u2ac0", + "supset;": "\u2283", + "supseteq;": "\u2287", + "supseteqq;": "\u2ac6", + "supsetneq;": "\u228b", + "supsetneqq;": "\u2acc", + "supsim;": "\u2ac8", + "supsub;": "\u2ad4", + "supsup;": "\u2ad6", + "swArr;": "\u21d9", + "swarhk;": "\u2926", + "swarr;": "\u2199", + "swarrow;": "\u2199", + "swnwar;": "\u292a", + "szlig": "\xdf", + "szlig;": "\xdf", + "target;": "\u2316", + "tau;": "\u03c4", + "tbrk;": "\u23b4", + "tcaron;": "\u0165", + "tcedil;": "\u0163", + "tcy;": "\u0442", + "tdot;": "\u20db", + "telrec;": "\u2315", + "tfr;": "\U0001d531", + "there4;": "\u2234", + "therefore;": "\u2234", + "theta;": "\u03b8", + "thetasym;": "\u03d1", + "thetav;": "\u03d1", + "thickapprox;": "\u2248", + "thicksim;": "\u223c", + "thinsp;": "\u2009", + "thkap;": "\u2248", + "thksim;": "\u223c", + "thorn": "\xfe", + "thorn;": "\xfe", + "tilde;": "\u02dc", + "times": "\xd7", + "times;": "\xd7", + "timesb;": "\u22a0", + "timesbar;": "\u2a31", + "timesd;": "\u2a30", + "tint;": "\u222d", + "toea;": "\u2928", + "top;": "\u22a4", + "topbot;": "\u2336", + "topcir;": "\u2af1", + "topf;": "\U0001d565", + "topfork;": "\u2ada", + "tosa;": "\u2929", + "tprime;": "\u2034", + "trade;": "\u2122", + "triangle;": "\u25b5", + "triangledown;": "\u25bf", + "triangleleft;": "\u25c3", + "trianglelefteq;": "\u22b4", + "triangleq;": "\u225c", + "triangleright;": "\u25b9", + "trianglerighteq;": "\u22b5", + "tridot;": "\u25ec", + "trie;": "\u225c", + "triminus;": "\u2a3a", + "triplus;": "\u2a39", + "trisb;": "\u29cd", + "tritime;": "\u2a3b", + "trpezium;": "\u23e2", + "tscr;": "\U0001d4c9", + "tscy;": "\u0446", + "tshcy;": "\u045b", + "tstrok;": "\u0167", + "twixt;": "\u226c", + "twoheadleftarrow;": "\u219e", + "twoheadrightarrow;": "\u21a0", + "uArr;": "\u21d1", + "uHar;": "\u2963", + "uacute": "\xfa", + "uacute;": "\xfa", + "uarr;": "\u2191", + "ubrcy;": "\u045e", + "ubreve;": "\u016d", + "ucirc": "\xfb", + "ucirc;": "\xfb", + "ucy;": "\u0443", + "udarr;": "\u21c5", + "udblac;": "\u0171", + "udhar;": "\u296e", + "ufisht;": "\u297e", + "ufr;": "\U0001d532", + "ugrave": "\xf9", + "ugrave;": "\xf9", + "uharl;": "\u21bf", + "uharr;": "\u21be", + "uhblk;": "\u2580", + "ulcorn;": "\u231c", + "ulcorner;": "\u231c", + "ulcrop;": "\u230f", + "ultri;": "\u25f8", + "umacr;": "\u016b", + "uml": "\xa8", + "uml;": "\xa8", + "uogon;": "\u0173", + "uopf;": "\U0001d566", + "uparrow;": "\u2191", + "updownarrow;": "\u2195", + "upharpoonleft;": "\u21bf", + "upharpoonright;": "\u21be", + "uplus;": "\u228e", + "upsi;": "\u03c5", + "upsih;": "\u03d2", + "upsilon;": "\u03c5", + "upuparrows;": "\u21c8", + "urcorn;": "\u231d", + "urcorner;": "\u231d", + "urcrop;": "\u230e", + "uring;": "\u016f", + "urtri;": "\u25f9", + "uscr;": "\U0001d4ca", + "utdot;": "\u22f0", + "utilde;": "\u0169", + "utri;": "\u25b5", + "utrif;": "\u25b4", + "uuarr;": "\u21c8", + "uuml": "\xfc", + "uuml;": "\xfc", + "uwangle;": "\u29a7", + "vArr;": "\u21d5", + "vBar;": "\u2ae8", + "vBarv;": "\u2ae9", + "vDash;": "\u22a8", + "vangrt;": "\u299c", + "varepsilon;": "\u03f5", + "varkappa;": "\u03f0", + "varnothing;": "\u2205", + "varphi;": "\u03d5", + "varpi;": "\u03d6", + "varpropto;": "\u221d", + "varr;": "\u2195", + "varrho;": "\u03f1", + "varsigma;": "\u03c2", + "varsubsetneq;": "\u228a\ufe00", + "varsubsetneqq;": "\u2acb\ufe00", + "varsupsetneq;": "\u228b\ufe00", + "varsupsetneqq;": "\u2acc\ufe00", + "vartheta;": "\u03d1", + "vartriangleleft;": "\u22b2", + "vartriangleright;": "\u22b3", + "vcy;": "\u0432", + "vdash;": "\u22a2", + "vee;": "\u2228", + "veebar;": "\u22bb", + "veeeq;": "\u225a", + "vellip;": "\u22ee", + "verbar;": "|", + "vert;": "|", + "vfr;": "\U0001d533", + "vltri;": "\u22b2", + "vnsub;": "\u2282\u20d2", + "vnsup;": "\u2283\u20d2", + "vopf;": "\U0001d567", + "vprop;": "\u221d", + "vrtri;": "\u22b3", + "vscr;": "\U0001d4cb", + "vsubnE;": "\u2acb\ufe00", + "vsubne;": "\u228a\ufe00", + "vsupnE;": "\u2acc\ufe00", + "vsupne;": "\u228b\ufe00", + "vzigzag;": "\u299a", + "wcirc;": "\u0175", + "wedbar;": "\u2a5f", + "wedge;": "\u2227", + "wedgeq;": "\u2259", + "weierp;": "\u2118", + "wfr;": "\U0001d534", + "wopf;": "\U0001d568", + "wp;": "\u2118", + "wr;": "\u2240", + "wreath;": "\u2240", + "wscr;": "\U0001d4cc", + "xcap;": "\u22c2", + "xcirc;": "\u25ef", + "xcup;": "\u22c3", + "xdtri;": "\u25bd", + "xfr;": "\U0001d535", + "xhArr;": "\u27fa", + "xharr;": "\u27f7", + "xi;": "\u03be", + "xlArr;": "\u27f8", + "xlarr;": "\u27f5", + "xmap;": "\u27fc", + "xnis;": "\u22fb", + "xodot;": "\u2a00", + "xopf;": "\U0001d569", + "xoplus;": "\u2a01", + "xotime;": "\u2a02", + "xrArr;": "\u27f9", + "xrarr;": "\u27f6", + "xscr;": "\U0001d4cd", + "xsqcup;": "\u2a06", + "xuplus;": "\u2a04", + "xutri;": "\u25b3", + "xvee;": "\u22c1", + "xwedge;": "\u22c0", + "yacute": "\xfd", + "yacute;": "\xfd", + "yacy;": "\u044f", + "ycirc;": "\u0177", + "ycy;": "\u044b", + "yen": "\xa5", + "yen;": "\xa5", + "yfr;": "\U0001d536", + "yicy;": "\u0457", + "yopf;": "\U0001d56a", + "yscr;": "\U0001d4ce", + "yucy;": "\u044e", + "yuml": "\xff", + "yuml;": "\xff", + "zacute;": "\u017a", + "zcaron;": "\u017e", + "zcy;": "\u0437", + "zdot;": "\u017c", + "zeetrf;": "\u2128", + "zeta;": "\u03b6", + "zfr;": "\U0001d537", + "zhcy;": "\u0436", + "zigrarr;": "\u21dd", + "zopf;": "\U0001d56b", + "zscr;": "\U0001d4cf", + "zwj;": "\u200d", + "zwnj;": "\u200c", +} + +replacementCharacters = { + 0x0: "\uFFFD", + 0x0d: "\u000D", + 0x80: "\u20AC", + 0x81: "\u0081", + 0x81: "\u0081", + 0x82: "\u201A", + 0x83: "\u0192", + 0x84: "\u201E", + 0x85: "\u2026", + 0x86: "\u2020", + 0x87: "\u2021", + 0x88: "\u02C6", + 0x89: "\u2030", + 0x8A: "\u0160", + 0x8B: "\u2039", + 0x8C: "\u0152", + 0x8D: "\u008D", + 0x8E: "\u017D", + 0x8F: "\u008F", + 0x90: "\u0090", + 0x91: "\u2018", + 0x92: "\u2019", + 0x93: "\u201C", + 0x94: "\u201D", + 0x95: "\u2022", + 0x96: "\u2013", + 0x97: "\u2014", + 0x98: "\u02DC", + 0x99: "\u2122", + 0x9A: "\u0161", + 0x9B: "\u203A", + 0x9C: "\u0153", + 0x9D: "\u009D", + 0x9E: "\u017E", + 0x9F: "\u0178", +} + +encodings = { + '437': 'cp437', + '850': 'cp850', + '852': 'cp852', + '855': 'cp855', + '857': 'cp857', + '860': 'cp860', + '861': 'cp861', + '862': 'cp862', + '863': 'cp863', + '865': 'cp865', + '866': 'cp866', + '869': 'cp869', + 'ansix341968': 'ascii', + 'ansix341986': 'ascii', + 'arabic': 'iso8859-6', + 'ascii': 'ascii', + 'asmo708': 'iso8859-6', + 'big5': 'big5', + 'big5hkscs': 'big5hkscs', + 'chinese': 'gbk', + 'cp037': 'cp037', + 'cp1026': 'cp1026', + 'cp154': 'ptcp154', + 'cp367': 'ascii', + 'cp424': 'cp424', + 'cp437': 'cp437', + 'cp500': 'cp500', + 'cp775': 'cp775', + 'cp819': 'windows-1252', + 'cp850': 'cp850', + 'cp852': 'cp852', + 'cp855': 'cp855', + 'cp857': 'cp857', + 'cp860': 'cp860', + 'cp861': 'cp861', + 'cp862': 'cp862', + 'cp863': 'cp863', + 'cp864': 'cp864', + 'cp865': 'cp865', + 'cp866': 'cp866', + 'cp869': 'cp869', + 'cp936': 'gbk', + 'cpgr': 'cp869', + 'cpis': 'cp861', + 'csascii': 'ascii', + 'csbig5': 'big5', + 'cseuckr': 'cp949', + 'cseucpkdfmtjapanese': 'euc_jp', + 'csgb2312': 'gbk', + 'cshproman8': 'hp-roman8', + 'csibm037': 'cp037', + 'csibm1026': 'cp1026', + 'csibm424': 'cp424', + 'csibm500': 'cp500', + 'csibm855': 'cp855', + 'csibm857': 'cp857', + 'csibm860': 'cp860', + 'csibm861': 'cp861', + 'csibm863': 'cp863', + 'csibm864': 'cp864', + 'csibm865': 'cp865', + 'csibm866': 'cp866', + 'csibm869': 'cp869', + 'csiso2022jp': 'iso2022_jp', + 'csiso2022jp2': 'iso2022_jp_2', + 'csiso2022kr': 'iso2022_kr', + 'csiso58gb231280': 'gbk', + 'csisolatin1': 'windows-1252', + 'csisolatin2': 'iso8859-2', + 'csisolatin3': 'iso8859-3', + 'csisolatin4': 'iso8859-4', + 'csisolatin5': 'windows-1254', + 'csisolatin6': 'iso8859-10', + 'csisolatinarabic': 'iso8859-6', + 'csisolatincyrillic': 'iso8859-5', + 'csisolatingreek': 'iso8859-7', + 'csisolatinhebrew': 'iso8859-8', + 'cskoi8r': 'koi8-r', + 'csksc56011987': 'cp949', + 'cspc775baltic': 'cp775', + 'cspc850multilingual': 'cp850', + 'cspc862latinhebrew': 'cp862', + 'cspc8codepage437': 'cp437', + 'cspcp852': 'cp852', + 'csptcp154': 'ptcp154', + 'csshiftjis': 'shift_jis', + 'csunicode11utf7': 'utf-7', + 'cyrillic': 'iso8859-5', + 'cyrillicasian': 'ptcp154', + 'ebcdiccpbe': 'cp500', + 'ebcdiccpca': 'cp037', + 'ebcdiccpch': 'cp500', + 'ebcdiccphe': 'cp424', + 'ebcdiccpnl': 'cp037', + 'ebcdiccpus': 'cp037', + 'ebcdiccpwt': 'cp037', + 'ecma114': 'iso8859-6', + 'ecma118': 'iso8859-7', + 'elot928': 'iso8859-7', + 'eucjp': 'euc_jp', + 'euckr': 'cp949', + 'extendedunixcodepackedformatforjapanese': 'euc_jp', + 'gb18030': 'gb18030', + 'gb2312': 'gbk', + 'gb231280': 'gbk', + 'gbk': 'gbk', + 'greek': 'iso8859-7', + 'greek8': 'iso8859-7', + 'hebrew': 'iso8859-8', + 'hproman8': 'hp-roman8', + 'hzgb2312': 'hz', + 'ibm037': 'cp037', + 'ibm1026': 'cp1026', + 'ibm367': 'ascii', + 'ibm424': 'cp424', + 'ibm437': 'cp437', + 'ibm500': 'cp500', + 'ibm775': 'cp775', + 'ibm819': 'windows-1252', + 'ibm850': 'cp850', + 'ibm852': 'cp852', + 'ibm855': 'cp855', + 'ibm857': 'cp857', + 'ibm860': 'cp860', + 'ibm861': 'cp861', + 'ibm862': 'cp862', + 'ibm863': 'cp863', + 'ibm864': 'cp864', + 'ibm865': 'cp865', + 'ibm866': 'cp866', + 'ibm869': 'cp869', + 'iso2022jp': 'iso2022_jp', + 'iso2022jp2': 'iso2022_jp_2', + 'iso2022kr': 'iso2022_kr', + 'iso646irv1991': 'ascii', + 'iso646us': 'ascii', + 'iso88591': 'windows-1252', + 'iso885910': 'iso8859-10', + 'iso8859101992': 'iso8859-10', + 'iso885911987': 'windows-1252', + 'iso885913': 'iso8859-13', + 'iso885914': 'iso8859-14', + 'iso8859141998': 'iso8859-14', + 'iso885915': 'iso8859-15', + 'iso885916': 'iso8859-16', + 'iso8859162001': 'iso8859-16', + 'iso88592': 'iso8859-2', + 'iso885921987': 'iso8859-2', + 'iso88593': 'iso8859-3', + 'iso885931988': 'iso8859-3', + 'iso88594': 'iso8859-4', + 'iso885941988': 'iso8859-4', + 'iso88595': 'iso8859-5', + 'iso885951988': 'iso8859-5', + 'iso88596': 'iso8859-6', + 'iso885961987': 'iso8859-6', + 'iso88597': 'iso8859-7', + 'iso885971987': 'iso8859-7', + 'iso88598': 'iso8859-8', + 'iso885981988': 'iso8859-8', + 'iso88599': 'windows-1254', + 'iso885991989': 'windows-1254', + 'isoceltic': 'iso8859-14', + 'isoir100': 'windows-1252', + 'isoir101': 'iso8859-2', + 'isoir109': 'iso8859-3', + 'isoir110': 'iso8859-4', + 'isoir126': 'iso8859-7', + 'isoir127': 'iso8859-6', + 'isoir138': 'iso8859-8', + 'isoir144': 'iso8859-5', + 'isoir148': 'windows-1254', + 'isoir149': 'cp949', + 'isoir157': 'iso8859-10', + 'isoir199': 'iso8859-14', + 'isoir226': 'iso8859-16', + 'isoir58': 'gbk', + 'isoir6': 'ascii', + 'koi8r': 'koi8-r', + 'koi8u': 'koi8-u', + 'korean': 'cp949', + 'ksc5601': 'cp949', + 'ksc56011987': 'cp949', + 'ksc56011989': 'cp949', + 'l1': 'windows-1252', + 'l10': 'iso8859-16', + 'l2': 'iso8859-2', + 'l3': 'iso8859-3', + 'l4': 'iso8859-4', + 'l5': 'windows-1254', + 'l6': 'iso8859-10', + 'l8': 'iso8859-14', + 'latin1': 'windows-1252', + 'latin10': 'iso8859-16', + 'latin2': 'iso8859-2', + 'latin3': 'iso8859-3', + 'latin4': 'iso8859-4', + 'latin5': 'windows-1254', + 'latin6': 'iso8859-10', + 'latin8': 'iso8859-14', + 'latin9': 'iso8859-15', + 'ms936': 'gbk', + 'mskanji': 'shift_jis', + 'pt154': 'ptcp154', + 'ptcp154': 'ptcp154', + 'r8': 'hp-roman8', + 'roman8': 'hp-roman8', + 'shiftjis': 'shift_jis', + 'tis620': 'cp874', + 'unicode11utf7': 'utf-7', + 'us': 'ascii', + 'usascii': 'ascii', + 'utf16': 'utf-16', + 'utf16be': 'utf-16-be', + 'utf16le': 'utf-16-le', + 'utf8': 'utf-8', + 'windows1250': 'cp1250', + 'windows1251': 'cp1251', + 'windows1252': 'cp1252', + 'windows1253': 'cp1253', + 'windows1254': 'cp1254', + 'windows1255': 'cp1255', + 'windows1256': 'cp1256', + 'windows1257': 'cp1257', + 'windows1258': 'cp1258', + 'windows936': 'gbk', + 'x-x-big5': 'big5'} + +tokenTypes = { + "Doctype": 0, + "Characters": 1, + "SpaceCharacters": 2, + "StartTag": 3, + "EndTag": 4, + "EmptyTag": 5, + "Comment": 6, + "ParseError": 7 +} + +tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"], + tokenTypes["EmptyTag"])) + + +prefixes = dict([(v, k) for k, v in namespaces.items()]) +prefixes["http://www.w3.org/1998/Math/MathML"] = "math" + + +class DataLossWarning(UserWarning): + pass + + +class ReparseException(Exception): + pass diff --git a/awx/lib/site-packages/pip/vendor/html5lib/filters/__init__.py b/awx/lib/site-packages/pip/vendor/html5lib/filters/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pip/vendor/html5lib/filters/_base.py b/awx/lib/site-packages/pip/vendor/html5lib/filters/_base.py new file mode 100644 index 0000000000..c7dbaed0fa --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/filters/_base.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, unicode_literals + + +class Filter(object): + def __init__(self, source): + self.source = source + + def __iter__(self): + return iter(self.source) + + def __getattr__(self, name): + return getattr(self.source, name) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/filters/alphabeticalattributes.py b/awx/lib/site-packages/pip/vendor/html5lib/filters/alphabeticalattributes.py new file mode 100644 index 0000000000..fed6996c1d --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/filters/alphabeticalattributes.py @@ -0,0 +1,20 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + + +class Filter(_base.Filter): + def __iter__(self): + for token in _base.Filter.__iter__(self): + if token["type"] in ("StartTag", "EmptyTag"): + attrs = OrderedDict() + for name, value in sorted(token["data"].items(), + key=lambda x: x[0]): + attrs[name] = value + token["data"] = attrs + yield token diff --git a/awx/lib/site-packages/pip/vendor/html5lib/filters/inject_meta_charset.py b/awx/lib/site-packages/pip/vendor/html5lib/filters/inject_meta_charset.py new file mode 100644 index 0000000000..ca33b70b53 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/filters/inject_meta_charset.py @@ -0,0 +1,65 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + + +class Filter(_base.Filter): + def __init__(self, source, encoding): + _base.Filter.__init__(self, source) + self.encoding = encoding + + def __iter__(self): + state = "pre_head" + meta_found = (self.encoding is None) + pending = [] + + for token in _base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag": + if token["name"].lower() == "head": + state = "in_head" + + elif type == "EmptyTag": + if token["name"].lower() == "meta": + # replace charset with actual encoding + has_http_equiv_content_type = False + for (namespace, name), value in token["data"].items(): + if namespace is not None: + continue + elif name.lower() == 'charset': + token["data"][(namespace, name)] = self.encoding + meta_found = True + break + elif name == 'http-equiv' and value.lower() == 'content-type': + has_http_equiv_content_type = True + else: + if has_http_equiv_content_type and (None, "content") in token["data"]: + token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding + meta_found = True + + elif token["name"].lower() == "head" and not meta_found: + # insert meta into empty head + yield {"type": "StartTag", "name": "head", + "data": token["data"]} + yield {"type": "EmptyTag", "name": "meta", + "data": {(None, "charset"): self.encoding}} + yield {"type": "EndTag", "name": "head"} + meta_found = True + continue + + elif type == "EndTag": + if token["name"].lower() == "head" and pending: + # insert meta into head (if necessary) and flush pending queue + yield pending.pop(0) + if not meta_found: + yield {"type": "EmptyTag", "name": "meta", + "data": {(None, "charset"): self.encoding}} + while pending: + yield pending.pop(0) + meta_found = True + state = "post_head" + + if state == "in_head": + pending.append(token) + else: + yield token diff --git a/awx/lib/site-packages/pip/vendor/html5lib/filters/lint.py b/awx/lib/site-packages/pip/vendor/html5lib/filters/lint.py new file mode 100644 index 0000000000..83ad63971d --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/filters/lint.py @@ -0,0 +1,93 @@ +from __future__ import absolute_import, division, unicode_literals + +from gettext import gettext +_ = gettext + +from . import _base +from ..constants import cdataElements, rcdataElements, voidElements + +from ..constants import spaceCharacters +spaceCharacters = "".join(spaceCharacters) + + +class LintError(Exception): + pass + + +class Filter(_base.Filter): + def __iter__(self): + open_elements = [] + contentModelFlag = "PCDATA" + for token in _base.Filter.__iter__(self): + type = token["type"] + if type in ("StartTag", "EmptyTag"): + name = token["name"] + if contentModelFlag != "PCDATA": + raise LintError(_("StartTag not in PCDATA content model flag: %s") % name) + if not isinstance(name, str): + raise LintError(_("Tag name is not a string: %r") % name) + if not name: + raise LintError(_("Empty tag name")) + if type == "StartTag" and name in voidElements: + raise LintError(_("Void element reported as StartTag token: %s") % name) + elif type == "EmptyTag" and name not in voidElements: + raise LintError(_("Non-void element reported as EmptyTag token: %s") % token["name"]) + if type == "StartTag": + open_elements.append(name) + for name, value in token["data"]: + if not isinstance(name, str): + raise LintError(_("Attribute name is not a string: %r") % name) + if not name: + raise LintError(_("Empty attribute name")) + if not isinstance(value, str): + raise LintError(_("Attribute value is not a string: %r") % value) + if name in cdataElements: + contentModelFlag = "CDATA" + elif name in rcdataElements: + contentModelFlag = "RCDATA" + elif name == "plaintext": + contentModelFlag = "PLAINTEXT" + + elif type == "EndTag": + name = token["name"] + if not isinstance(name, str): + raise LintError(_("Tag name is not a string: %r") % name) + if not name: + raise LintError(_("Empty tag name")) + if name in voidElements: + raise LintError(_("Void element reported as EndTag token: %s") % name) + start_name = open_elements.pop() + if start_name != name: + raise LintError(_("EndTag (%s) does not match StartTag (%s)") % (name, start_name)) + contentModelFlag = "PCDATA" + + elif type == "Comment": + if contentModelFlag != "PCDATA": + raise LintError(_("Comment not in PCDATA content model flag")) + + elif type in ("Characters", "SpaceCharacters"): + data = token["data"] + if not isinstance(data, str): + raise LintError(_("Attribute name is not a string: %r") % data) + if not data: + raise LintError(_("%s token with empty data") % type) + if type == "SpaceCharacters": + data = data.strip(spaceCharacters) + if data: + raise LintError(_("Non-space character(s) found in SpaceCharacters token: ") % data) + + elif type == "Doctype": + name = token["name"] + if contentModelFlag != "PCDATA": + raise LintError(_("Doctype not in PCDATA content model flag: %s") % name) + if not isinstance(name, str): + raise LintError(_("Tag name is not a string: %r") % name) + # XXX: what to do with token["data"] ? + + elif type in ("ParseError", "SerializeError"): + pass + + else: + raise LintError(_("Unknown token type: %s") % type) + + yield token diff --git a/awx/lib/site-packages/pip/vendor/html5lib/filters/optionaltags.py b/awx/lib/site-packages/pip/vendor/html5lib/filters/optionaltags.py new file mode 100644 index 0000000000..fefe0b3097 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/filters/optionaltags.py @@ -0,0 +1,205 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + + +class Filter(_base.Filter): + def slider(self): + previous1 = previous2 = None + for token in self.source: + if previous1 is not None: + yield previous2, previous1, token + previous2 = previous1 + previous1 = token + yield previous2, previous1, None + + def __iter__(self): + for previous, token, next in self.slider(): + type = token["type"] + if type == "StartTag": + if (token["data"] or + not self.is_optional_start(token["name"], previous, next)): + yield token + elif type == "EndTag": + if not self.is_optional_end(token["name"], next): + yield token + else: + yield token + + def is_optional_start(self, tagname, previous, next): + type = next and next["type"] or None + if tagname in 'html': + # An html element's start tag may be omitted if the first thing + # inside the html element is not a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname == 'head': + # A head element's start tag may be omitted if the first thing + # inside the head element is an element. + # XXX: we also omit the start tag if the head element is empty + if type in ("StartTag", "EmptyTag"): + return True + elif type == "EndTag": + return next["name"] == "head" + elif tagname == 'body': + # A body element's start tag may be omitted if the first thing + # inside the body element is not a space character or a comment, + # except if the first thing inside the body element is a script + # or style element and the node immediately preceding the body + # element is a head element whose end tag has been omitted. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we do not look at the preceding event, so we never omit + # the body element's start tag if it's followed by a script or + # a style element. + return next["name"] not in ('script', 'style') + else: + return True + elif tagname == 'colgroup': + # A colgroup element's start tag may be omitted if the first thing + # inside the colgroup element is a col element, and if the element + # is not immediately preceeded by another colgroup element whose + # end tag has been omitted. + if type in ("StartTag", "EmptyTag"): + # XXX: we do not look at the preceding event, so instead we never + # omit the colgroup element's end tag when it is immediately + # followed by another colgroup element. See is_optional_end. + return next["name"] == "col" + else: + return False + elif tagname == 'tbody': + # A tbody element's start tag may be omitted if the first thing + # inside the tbody element is a tr element, and if the element is + # not immediately preceeded by a tbody, thead, or tfoot element + # whose end tag has been omitted. + if type == "StartTag": + # omit the thead and tfoot elements' end tag when they are + # immediately followed by a tbody element. See is_optional_end. + if previous and previous['type'] == 'EndTag' and \ + previous['name'] in ('tbody', 'thead', 'tfoot'): + return False + return next["name"] == 'tr' + else: + return False + return False + + def is_optional_end(self, tagname, next): + type = next and next["type"] or None + if tagname in ('html', 'head', 'body'): + # An html element's end tag may be omitted if the html element + # is not immediately followed by a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname in ('li', 'optgroup', 'tr'): + # A li element's end tag may be omitted if the li element is + # immediately followed by another li element or if there is + # no more content in the parent element. + # An optgroup element's end tag may be omitted if the optgroup + # element is immediately followed by another optgroup element, + # or if there is no more content in the parent element. + # A tr element's end tag may be omitted if the tr element is + # immediately followed by another tr element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] == tagname + else: + return type == "EndTag" or type is None + elif tagname in ('dt', 'dd'): + # A dt element's end tag may be omitted if the dt element is + # immediately followed by another dt element or a dd element. + # A dd element's end tag may be omitted if the dd element is + # immediately followed by another dd element or a dt element, + # or if there is no more content in the parent element. + if type == "StartTag": + return next["name"] in ('dt', 'dd') + elif tagname == 'dd': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'p': + # A p element's end tag may be omitted if the p element is + # immediately followed by an address, article, aside, + # blockquote, datagrid, dialog, dir, div, dl, fieldset, + # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, + # nav, ol, p, pre, section, table, or ul, element, or if + # there is no more content in the parent element. + if type in ("StartTag", "EmptyTag"): + return next["name"] in ('address', 'article', 'aside', + 'blockquote', 'datagrid', 'dialog', + 'dir', 'div', 'dl', 'fieldset', 'footer', + 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', + 'header', 'hr', 'menu', 'nav', 'ol', + 'p', 'pre', 'section', 'table', 'ul') + else: + return type == "EndTag" or type is None + elif tagname == 'option': + # An option element's end tag may be omitted if the option + # element is immediately followed by another option element, + # or if it is immediately followed by an <code>optgroup</code> + # element, or if there is no more content in the parent + # element. + if type == "StartTag": + return next["name"] in ('option', 'optgroup') + else: + return type == "EndTag" or type is None + elif tagname in ('rt', 'rp'): + # An rt element's end tag may be omitted if the rt element is + # immediately followed by an rt or rp element, or if there is + # no more content in the parent element. + # An rp element's end tag may be omitted if the rp element is + # immediately followed by an rt or rp element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('rt', 'rp') + else: + return type == "EndTag" or type is None + elif tagname == 'colgroup': + # A colgroup element's end tag may be omitted if the colgroup + # element is not immediately followed by a space character or + # a comment. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we also look for an immediately following colgroup + # element. See is_optional_start. + return next["name"] != 'colgroup' + else: + return True + elif tagname in ('thead', 'tbody'): + # A thead element's end tag may be omitted if the thead element + # is immediately followed by a tbody or tfoot element. + # A tbody element's end tag may be omitted if the tbody element + # is immediately followed by a tbody or tfoot element, or if + # there is no more content in the parent element. + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] in ['tbody', 'tfoot'] + elif tagname == 'tbody': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'tfoot': + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] == 'tbody' + else: + return type == "EndTag" or type is None + elif tagname in ('td', 'th'): + # A td element's end tag may be omitted if the td element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + # A th element's end tag may be omitted if the th element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('td', 'th') + else: + return type == "EndTag" or type is None + return False diff --git a/awx/lib/site-packages/pip/vendor/html5lib/filters/sanitizer.py b/awx/lib/site-packages/pip/vendor/html5lib/filters/sanitizer.py new file mode 100644 index 0000000000..b206b54e7a --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/filters/sanitizer.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base +from ..sanitizer import HTMLSanitizerMixin + + +class Filter(_base.Filter, HTMLSanitizerMixin): + def __iter__(self): + for token in _base.Filter.__iter__(self): + token = self.sanitize_token(token) + if token: + yield token diff --git a/awx/lib/site-packages/pip/vendor/html5lib/filters/whitespace.py b/awx/lib/site-packages/pip/vendor/html5lib/filters/whitespace.py new file mode 100644 index 0000000000..dfc60eebd3 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/filters/whitespace.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import, division, unicode_literals + +import re + +from . import _base +from ..constants import rcdataElements, spaceCharacters +spaceCharacters = "".join(spaceCharacters) + +SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) + + +class Filter(_base.Filter): + + spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) + + def __iter__(self): + preserve = 0 + for token in _base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag" \ + and (preserve or token["name"] in self.spacePreserveElements): + preserve += 1 + + elif type == "EndTag" and preserve: + preserve -= 1 + + elif not preserve and type == "SpaceCharacters" and token["data"]: + # Test on token["data"] above to not introduce spaces where there were not + token["data"] = " " + + elif not preserve and type == "Characters": + token["data"] = collapse_spaces(token["data"]) + + yield token + + +def collapse_spaces(text): + return SPACES_REGEX.sub(' ', text) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/html5parser.py b/awx/lib/site-packages/pip/vendor/html5lib/html5parser.py new file mode 100644 index 0000000000..9dfa4dd0a5 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/html5parser.py @@ -0,0 +1,2725 @@ +from __future__ import absolute_import, division, unicode_literals +from pip.vendor.six import with_metaclass + +import types + +from . import inputstream +from . import tokenizer + +from . import treebuilders +from .treebuilders._base import Marker + +from . import utils +from . import constants +from .constants import spaceCharacters, asciiUpper2Lower +from .constants import specialElements +from .constants import headingElements +from .constants import cdataElements, rcdataElements +from .constants import tokenTypes, ReparseException, namespaces +from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements + + +def parse(doc, treebuilder="etree", encoding=None, + namespaceHTMLElements=True): + """Parse a string or file-like object into a tree""" + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parse(doc, encoding=encoding) + + +def parseFragment(doc, container="div", treebuilder="etree", encoding=None, + namespaceHTMLElements=True): + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parseFragment(doc, container=container, encoding=encoding) + + +def method_decorator_metaclass(function): + class Decorated(type): + def __new__(meta, classname, bases, classDict): + for attributeName, attribute in classDict.items(): + if isinstance(attribute, types.FunctionType): + attribute = function(attribute) + + classDict[attributeName] = attribute + return type.__new__(meta, classname, bases, classDict) + return Decorated + + +class HTMLParser(object): + """HTML parser. Generates a tree structure from a stream of (possibly + malformed) HTML""" + + def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer, + strict=False, namespaceHTMLElements=True, debug=False): + """ + strict - raise an exception when a parse error is encountered + + tree - a treebuilder class controlling the type of tree that will be + returned. Built in treebuilders can be accessed through + html5lib.treebuilders.getTreeBuilder(treeType) + + tokenizer - a class that provides a stream of tokens to the treebuilder. + This may be replaced for e.g. a sanitizer which converts some tags to + text + """ + + # Raise an exception on the first error encountered + self.strict = strict + + if tree is None: + tree = treebuilders.getTreeBuilder("etree") + self.tree = tree(namespaceHTMLElements) + self.tokenizer_class = tokenizer + self.errors = [] + + self.phases = dict([(name, cls(self, self.tree)) for name, cls in + getPhases(debug).items()]) + + def _parse(self, stream, innerHTML=False, container="div", + encoding=None, parseMeta=True, useChardet=True, **kwargs): + + self.innerHTMLMode = innerHTML + self.container = container + self.tokenizer = self.tokenizer_class(stream, encoding=encoding, + parseMeta=parseMeta, + useChardet=useChardet, + parser=self, **kwargs) + self.reset() + + while True: + try: + self.mainLoop() + break + except ReparseException: + self.reset() + + def reset(self): + self.tree.reset() + self.firstStartTag = False + self.errors = [] + self.log = [] # only used with debug mode + # "quirks" / "limited quirks" / "no quirks" + self.compatMode = "no quirks" + + if self.innerHTMLMode: + self.innerHTML = self.container.lower() + + if self.innerHTML in cdataElements: + self.tokenizer.state = self.tokenizer.rcdataState + elif self.innerHTML in rcdataElements: + self.tokenizer.state = self.tokenizer.rawtextState + elif self.innerHTML == 'plaintext': + self.tokenizer.state = self.tokenizer.plaintextState + else: + # state already is data state + # self.tokenizer.state = self.tokenizer.dataState + pass + self.phase = self.phases["beforeHtml"] + self.phase.insertHtmlElement() + self.resetInsertionMode() + else: + self.innerHTML = False + self.phase = self.phases["initial"] + + self.lastPhase = None + + self.beforeRCDataPhase = None + + self.framesetOK = True + + def isHTMLIntegrationPoint(self, element): + if (element.name == "annotation-xml" and + element.namespace == namespaces["mathml"]): + return ("encoding" in element.attributes and + element.attributes["encoding"].translate( + asciiUpper2Lower) in + ("text/html", "application/xhtml+xml")) + else: + return (element.namespace, element.name) in htmlIntegrationPointElements + + def isMathMLTextIntegrationPoint(self, element): + return (element.namespace, element.name) in mathmlTextIntegrationPointElements + + def mainLoop(self): + CharactersToken = tokenTypes["Characters"] + SpaceCharactersToken = tokenTypes["SpaceCharacters"] + StartTagToken = tokenTypes["StartTag"] + EndTagToken = tokenTypes["EndTag"] + CommentToken = tokenTypes["Comment"] + DoctypeToken = tokenTypes["Doctype"] + ParseErrorToken = tokenTypes["ParseError"] + + for token in self.normalizedTokens(): + new_token = token + while new_token is not None: + currentNode = self.tree.openElements[-1] if self.tree.openElements else None + currentNodeNamespace = currentNode.namespace if currentNode else None + currentNodeName = currentNode.name if currentNode else None + + type = new_token["type"] + + if type == ParseErrorToken: + self.parseError(new_token["data"], new_token.get("datavars", {})) + new_token = None + else: + if (len(self.tree.openElements) == 0 or + currentNodeNamespace == self.tree.defaultNamespace or + (self.isMathMLTextIntegrationPoint(currentNode) and + ((type == StartTagToken and + token["name"] not in frozenset(["mglyph", "malignmark"])) or + type in (CharactersToken, SpaceCharactersToken))) or + (currentNodeNamespace == namespaces["mathml"] and + currentNodeName == "annotation-xml" and + token["name"] == "svg") or + (self.isHTMLIntegrationPoint(currentNode) and + type in (StartTagToken, CharactersToken, SpaceCharactersToken))): + phase = self.phase + else: + phase = self.phases["inForeignContent"] + + if type == CharactersToken: + new_token = phase.processCharacters(new_token) + elif type == SpaceCharactersToken: + new_token = phase.processSpaceCharacters(new_token) + elif type == StartTagToken: + new_token = phase.processStartTag(new_token) + elif type == EndTagToken: + new_token = phase.processEndTag(new_token) + elif type == CommentToken: + new_token = phase.processComment(new_token) + elif type == DoctypeToken: + new_token = phase.processDoctype(new_token) + + if (type == StartTagToken and token["selfClosing"] + and not token["selfClosingAcknowledged"]): + self.parseError("non-void-element-with-trailing-solidus", + {"name": token["name"]}) + + # When the loop finishes it's EOF + reprocess = True + phases = [] + while reprocess: + phases.append(self.phase) + reprocess = self.phase.processEOF() + if reprocess: + assert self.phase not in phases + + def normalizedTokens(self): + for token in self.tokenizer: + yield self.normalizeToken(token) + + def parse(self, stream, encoding=None, parseMeta=True, useChardet=True): + """Parse a HTML document into a well-formed tree + + stream - a filelike object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + """ + self._parse(stream, innerHTML=False, encoding=encoding, + parseMeta=parseMeta, useChardet=useChardet) + return self.tree.getDocument() + + def parseFragment(self, stream, container="div", encoding=None, + parseMeta=False, useChardet=True): + """Parse a HTML fragment into a well-formed tree fragment + + container - name of the element we're setting the innerHTML property + if set to None, default to 'div' + + stream - a filelike object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + """ + self._parse(stream, True, container=container, encoding=encoding) + return self.tree.getFragment() + + def parseError(self, errorcode="XXX-undefined-error", datavars={}): + # XXX The idea is to make errorcode mandatory. + self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) + if self.strict: + raise ParseError + + def normalizeToken(self, token): + """ HTML5 specific normalizations to the token stream """ + + if token["type"] == tokenTypes["StartTag"]: + token["data"] = dict(token["data"][::-1]) + + return token + + def adjustMathMLAttributes(self, token): + replacements = {"definitionurl": "definitionURL"} + for k, v in replacements.items(): + if k in token["data"]: + token["data"][v] = token["data"][k] + del token["data"][k] + + def adjustSVGAttributes(self, token): + replacements = { + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan" + } + for originalName in list(token["data"].keys()): + if originalName in replacements: + svgName = replacements[originalName] + token["data"][svgName] = token["data"][originalName] + del token["data"][originalName] + + def adjustForeignAttributes(self, token): + replacements = { + "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), + "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), + "xlink:href": ("xlink", "href", namespaces["xlink"]), + "xlink:role": ("xlink", "role", namespaces["xlink"]), + "xlink:show": ("xlink", "show", namespaces["xlink"]), + "xlink:title": ("xlink", "title", namespaces["xlink"]), + "xlink:type": ("xlink", "type", namespaces["xlink"]), + "xml:base": ("xml", "base", namespaces["xml"]), + "xml:lang": ("xml", "lang", namespaces["xml"]), + "xml:space": ("xml", "space", namespaces["xml"]), + "xmlns": (None, "xmlns", namespaces["xmlns"]), + "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) + } + + for originalName in token["data"].keys(): + if originalName in replacements: + foreignName = replacements[originalName] + token["data"][foreignName] = token["data"][originalName] + del token["data"][originalName] + + def reparseTokenNormal(self, token): + self.parser.phase() + + def resetInsertionMode(self): + # The name of this method is mostly historical. (It's also used in the + # specification.) + last = False + newModes = { + "select": "inSelect", + "td": "inCell", + "th": "inCell", + "tr": "inRow", + "tbody": "inTableBody", + "thead": "inTableBody", + "tfoot": "inTableBody", + "caption": "inCaption", + "colgroup": "inColumnGroup", + "table": "inTable", + "head": "inBody", + "body": "inBody", + "frameset": "inFrameset", + "html": "beforeHead" + } + for node in self.tree.openElements[::-1]: + nodeName = node.name + new_phase = None + if node == self.tree.openElements[0]: + assert self.innerHTML + last = True + nodeName = self.innerHTML + # Check for conditions that should only happen in the innerHTML + # case + if nodeName in ("select", "colgroup", "head", "html"): + assert self.innerHTML + + if not last and node.namespace != self.tree.defaultNamespace: + continue + + if nodeName in newModes: + new_phase = self.phases[newModes[nodeName]] + break + elif last: + new_phase = self.phases["inBody"] + break + + self.phase = new_phase + + def parseRCDataRawtext(self, token, contentType): + """Generic RCDATA/RAWTEXT Parsing algorithm + contentType - RCDATA or RAWTEXT + """ + assert contentType in ("RAWTEXT", "RCDATA") + + self.tree.insertElement(token) + + if contentType == "RAWTEXT": + self.tokenizer.state = self.tokenizer.rawtextState + else: + self.tokenizer.state = self.tokenizer.rcdataState + + self.originalPhase = self.phase + + self.phase = self.phases["text"] + + +def getPhases(debug): + def log(function): + """Logger that records which phase processes each token""" + type_names = dict((value, key) for key, value in + constants.tokenTypes.items()) + + def wrapped(self, *args, **kwargs): + if function.__name__.startswith("process") and len(args) > 0: + token = args[0] + try: + info = {"type": type_names[token['type']]} + except: + raise + if token['type'] in constants.tagTokenTypes: + info["name"] = token['name'] + + self.parser.log.append((self.parser.tokenizer.state.__name__, + self.parser.phase.__class__.__name__, + self.__class__.__name__, + function.__name__, + info)) + return function(self, *args, **kwargs) + else: + return function(self, *args, **kwargs) + return wrapped + + def getMetaclass(use_metaclass, metaclass_func): + if use_metaclass: + return method_decorator_metaclass(metaclass_func) + else: + return type + + class Phase(with_metaclass(getMetaclass(debug, log))): + """Base class for helper object that implements each phase of processing + """ + + def __init__(self, parser, tree): + self.parser = parser + self.tree = tree + + def processEOF(self): + raise NotImplementedError + + def processComment(self, token): + # For most phases the following is correct. Where it's not it will be + # overridden. + self.tree.insertComment(token, self.tree.openElements[-1]) + + def processDoctype(self, token): + self.parser.parseError("unexpected-doctype") + + def processCharacters(self, token): + self.tree.insertText(token["data"]) + + def processSpaceCharacters(self, token): + self.tree.insertText(token["data"]) + + def processStartTag(self, token): + return self.startTagHandler[token["name"]](token) + + def startTagHtml(self, token): + if not self.parser.firstStartTag and token["name"] == "html": + self.parser.parseError("non-html-root") + # XXX Need a check here to see if the first start tag token emitted is + # this token... If it's not, invoke self.parser.parseError(). + for attr, value in token["data"].items(): + if attr not in self.tree.openElements[0].attributes: + self.tree.openElements[0].attributes[attr] = value + self.parser.firstStartTag = False + + def processEndTag(self, token): + return self.endTagHandler[token["name"]](token) + + class InitialPhase(Phase): + def processSpaceCharacters(self, token): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + correct = token["correct"] + + if (name != "html" or publicId is not None or + systemId is not None and systemId != "about:legacy-compat"): + self.parser.parseError("unknown-doctype") + + if publicId is None: + publicId = "" + + self.tree.insertDoctype(token) + + if publicId != "": + publicId = publicId.translate(asciiUpper2Lower) + + if (not correct or token["name"] != "html" + or publicId.startswith( + ("+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//")) + or publicId in + ("-//w3o//dtd w3 html strict 3.0//en//", + "-/w3c/dtd html 4.0 transitional/en", + "html") + or publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is None + or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): + self.parser.compatMode = "quirks" + elif (publicId.startswith( + ("-//w3c//dtd xhtml 1.0 frameset//", + "-//w3c//dtd xhtml 1.0 transitional//")) + or publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is not None): + self.parser.compatMode = "limited quirks" + + self.parser.phase = self.parser.phases["beforeHtml"] + + def anythingElse(self): + self.parser.compatMode = "quirks" + self.parser.phase = self.parser.phases["beforeHtml"] + + def processCharacters(self, token): + self.parser.parseError("expected-doctype-but-got-chars") + self.anythingElse() + return token + + def processStartTag(self, token): + self.parser.parseError("expected-doctype-but-got-start-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEndTag(self, token): + self.parser.parseError("expected-doctype-but-got-end-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEOF(self): + self.parser.parseError("expected-doctype-but-got-eof") + self.anythingElse() + return True + + class BeforeHtmlPhase(Phase): + # helper methods + def insertHtmlElement(self): + self.tree.insertRoot(impliedTagToken("html", "StartTag")) + self.parser.phase = self.parser.phases["beforeHead"] + + # other + def processEOF(self): + self.insertHtmlElement() + return True + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.insertHtmlElement() + return token + + def processStartTag(self, token): + if token["name"] == "html": + self.parser.firstStartTag = True + self.insertHtmlElement() + return token + + def processEndTag(self, token): + if token["name"] not in ("head", "body", "html", "br"): + self.parser.parseError("unexpected-end-tag-before-html", + {"name": token["name"]}) + else: + self.insertHtmlElement() + return token + + class BeforeHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("head", "body", "html", "br"), self.endTagImplyHead) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.startTagHead(impliedTagToken("head", "StartTag")) + return True + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.tree.insertElement(token) + self.tree.headPointer = self.tree.openElements[-1] + self.parser.phase = self.parser.phases["inHead"] + + def startTagOther(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagImplyHead(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagOther(self, token): + self.parser.parseError("end-tag-after-implied-root", + {"name": token["name"]}) + + class InHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("title", self.startTagTitle), + (("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle), + ("script", self.startTagScript), + (("base", "basefont", "bgsound", "command", "link"), + self.startTagBaseLinkCommand), + ("meta", self.startTagMeta), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self. endTagHandler = utils.MethodDispatcher([ + ("head", self.endTagHead), + (("br", "html", "body"), self.endTagHtmlBodyBr) + ]) + self.endTagHandler.default = self.endTagOther + + # the real thing + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.parser.parseError("two-heads-are-not-better-than-one") + + def startTagBaseLinkCommand(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagMeta(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + attributes = token["data"] + if self.parser.tokenizer.stream.charEncoding[1] == "tentative": + if "charset" in attributes: + self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) + elif ("content" in attributes and + "http-equiv" in attributes and + attributes["http-equiv"].lower() == "content-type"): + # Encoding it as UTF-8 here is a hack, as really we should pass + # the abstract Unicode string, and just use the + # ContentAttrParser on that, but using UTF-8 allows all chars + # to be encoded and as a ASCII-superset works. + data = inputstream.EncodingBytes(attributes["content"].encode("utf-8")) + parser = inputstream.ContentAttrParser(data) + codec = parser.parse() + self.parser.tokenizer.stream.changeEncoding(codec) + + def startTagTitle(self, token): + self.parser.parseRCDataRawtext(token, "RCDATA") + + def startTagNoScriptNoFramesStyle(self, token): + # Need to decide whether to implement the scripting-disabled case + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagScript(self, token): + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState + self.parser.originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["text"] + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHead(self, token): + node = self.parser.tree.openElements.pop() + assert node.name == "head", "Expected head got %s" % node.name + self.parser.phase = self.parser.phases["afterHead"] + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.endTagHead(impliedTagToken("head")) + + # XXX If we implement a parser for which scripting is disabled we need to + # implement this phase. + # + # class InHeadNoScriptPhase(Phase): + class AfterHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", + "style", "title"), + self.startTagFromHead), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"), + self.endTagHtmlBodyBr)]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagBody(self, token): + self.parser.framesetOK = False + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inBody"] + + def startTagFrameset(self, token): + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inFrameset"] + + def startTagFromHead(self, token): + self.parser.parseError("unexpected-start-tag-out-of-my-head", + {"name": token["name"]}) + self.tree.openElements.append(self.tree.headPointer) + self.parser.phases["inHead"].processStartTag(token) + for node in self.tree.openElements[::-1]: + if node.name == "head": + self.tree.openElements.remove(node) + break + + def startTagHead(self, token): + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.tree.insertElement(impliedTagToken("body", "StartTag")) + self.parser.phase = self.parser.phases["inBody"] + self.parser.framesetOK = True + + class InBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody + # the really-really-really-very crazy mode + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + # Keep a ref to this for special handling of whitespace in <pre> + self.processSpaceCharactersNonPre = self.processSpaceCharacters + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("base", "basefont", "bgsound", "command", "link", "meta", + "noframes", "script", "style", "title"), + self.startTagProcessInHead), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("address", "article", "aside", "blockquote", "center", "details", + "details", "dir", "div", "dl", "fieldset", "figcaption", "figure", + "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", + "section", "summary", "ul"), + self.startTagCloseP), + (headingElements, self.startTagHeading), + (("pre", "listing"), self.startTagPreListing), + ("form", self.startTagForm), + (("li", "dd", "dt"), self.startTagListItem), + ("plaintext", self.startTagPlaintext), + ("a", self.startTagA), + (("b", "big", "code", "em", "font", "i", "s", "small", "strike", + "strong", "tt", "u"), self.startTagFormatting), + ("nobr", self.startTagNobr), + ("button", self.startTagButton), + (("applet", "marquee", "object"), self.startTagAppletMarqueeObject), + ("xmp", self.startTagXmp), + ("table", self.startTagTable), + (("area", "br", "embed", "img", "keygen", "wbr"), + self.startTagVoidFormatting), + (("param", "source", "track"), self.startTagParamSource), + ("input", self.startTagInput), + ("hr", self.startTagHr), + ("image", self.startTagImage), + ("isindex", self.startTagIsIndex), + ("textarea", self.startTagTextarea), + ("iframe", self.startTagIFrame), + (("noembed", "noframes", "noscript"), self.startTagRawtext), + ("select", self.startTagSelect), + (("rp", "rt"), self.startTagRpRt), + (("option", "optgroup"), self.startTagOpt), + (("math"), self.startTagMath), + (("svg"), self.startTagSvg), + (("caption", "col", "colgroup", "frame", "head", + "tbody", "td", "tfoot", "th", "thead", + "tr"), self.startTagMisplaced) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("body", self.endTagBody), + ("html", self.endTagHtml), + (("address", "article", "aside", "blockquote", "button", "center", + "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", + "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", + "section", "summary", "ul"), self.endTagBlock), + ("form", self.endTagForm), + ("p", self.endTagP), + (("dd", "dt", "li"), self.endTagListItem), + (headingElements, self.endTagHeading), + (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", + "strike", "strong", "tt", "u"), self.endTagFormatting), + (("applet", "marquee", "object"), self.endTagAppletMarqueeObject), + ("br", self.endTagBr), + ]) + self.endTagHandler.default = self.endTagOther + + def isMatchingFormattingElement(self, node1, node2): + if node1.name != node2.name or node1.namespace != node2.namespace: + return False + elif len(node1.attributes) != len(node2.attributes): + return False + else: + attributes1 = sorted(node1.attributes.items()) + attributes2 = sorted(node2.attributes.items()) + for attr1, attr2 in zip(attributes1, attributes2): + if attr1 != attr2: + return False + return True + + # helper + def addFormattingElement(self, token): + self.tree.insertElement(token) + element = self.tree.openElements[-1] + + matchingElements = [] + for node in self.tree.activeFormattingElements[::-1]: + if node is Marker: + break + elif self.isMatchingFormattingElement(node, element): + matchingElements.append(node) + + assert len(matchingElements) <= 3 + if len(matchingElements) == 3: + self.tree.activeFormattingElements.remove(matchingElements[-1]) + self.tree.activeFormattingElements.append(element) + + # the real deal + def processEOF(self): + allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", + "tfoot", "th", "thead", "tr", "body", + "html")) + for node in self.tree.openElements[::-1]: + if node.name not in allowed_elements: + self.parser.parseError("expected-closing-tag-but-got-eof") + break + # Stop parsing + + def processSpaceCharactersDropNewline(self, token): + # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we + # want to drop leading newlines + data = token["data"] + self.processSpaceCharacters = self.processSpaceCharactersNonPre + if (data.startswith("\n") and + self.tree.openElements[-1].name in ("pre", "listing", "textarea") + and not self.tree.openElements[-1].hasContent()): + data = data[1:] + if data: + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(data) + + def processCharacters(self, token): + if token["data"] == "\u0000": + # The tokenizer should always emit null on its own + return + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(token["data"]) + # This must be bad for performance + if (self.parser.framesetOK and + any([char not in spaceCharacters + for char in token["data"]])): + self.parser.framesetOK = False + + def processSpaceCharacters(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(token["data"]) + + def startTagProcessInHead(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagBody(self, token): + self.parser.parseError("unexpected-start-tag", {"name": "body"}) + if (len(self.tree.openElements) == 1 + or self.tree.openElements[1].name != "body"): + assert self.parser.innerHTML + else: + self.parser.framesetOK = False + for attr, value in token["data"].items(): + if attr not in self.tree.openElements[1].attributes: + self.tree.openElements[1].attributes[attr] = value + + def startTagFrameset(self, token): + self.parser.parseError("unexpected-start-tag", {"name": "frameset"}) + if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): + assert self.parser.innerHTML + elif not self.parser.framesetOK: + pass + else: + if self.tree.openElements[1].parent: + self.tree.openElements[1].parent.removeChild(self.tree.openElements[1]) + while self.tree.openElements[-1].name != "html": + self.tree.openElements.pop() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inFrameset"] + + def startTagCloseP(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + + def startTagPreListing(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + self.parser.framesetOK = False + self.processSpaceCharacters = self.processSpaceCharactersDropNewline + + def startTagForm(self, token): + if self.tree.formPointer: + self.parser.parseError("unexpected-start-tag", {"name": "form"}) + else: + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + self.tree.formPointer = self.tree.openElements[-1] + + def startTagListItem(self, token): + self.parser.framesetOK = False + + stopNamesMap = {"li": ["li"], + "dt": ["dt", "dd"], + "dd": ["dt", "dd"]} + stopNames = stopNamesMap[token["name"]] + for node in reversed(self.tree.openElements): + if node.name in stopNames: + self.parser.phase.processEndTag( + impliedTagToken(node.name, "EndTag")) + break + if (node.nameTuple in specialElements and + node.name not in ("address", "div", "p")): + break + + if self.tree.elementInScope("p", variant="button"): + self.parser.phase.processEndTag( + impliedTagToken("p", "EndTag")) + + self.tree.insertElement(token) + + def startTagPlaintext(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.plaintextState + + def startTagHeading(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + if self.tree.openElements[-1].name in headingElements: + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + self.tree.openElements.pop() + self.tree.insertElement(token) + + def startTagA(self, token): + afeAElement = self.tree.elementInActiveFormattingElements("a") + if afeAElement: + self.parser.parseError("unexpected-start-tag-implies-end-tag", + {"startName": "a", "endName": "a"}) + self.endTagFormatting(impliedTagToken("a")) + if afeAElement in self.tree.openElements: + self.tree.openElements.remove(afeAElement) + if afeAElement in self.tree.activeFormattingElements: + self.tree.activeFormattingElements.remove(afeAElement) + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(token) + + def startTagFormatting(self, token): + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(token) + + def startTagNobr(self, token): + self.tree.reconstructActiveFormattingElements() + if self.tree.elementInScope("nobr"): + self.parser.parseError("unexpected-start-tag-implies-end-tag", + {"startName": "nobr", "endName": "nobr"}) + self.processEndTag(impliedTagToken("nobr")) + # XXX Need tests that trigger the following + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(token) + + def startTagButton(self, token): + if self.tree.elementInScope("button"): + self.parser.parseError("unexpected-start-tag-implies-end-tag", + {"startName": "button", "endName": "button"}) + self.processEndTag(impliedTagToken("button")) + return token + else: + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + self.parser.framesetOK = False + + def startTagAppletMarqueeObject(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + self.tree.activeFormattingElements.append(Marker) + self.parser.framesetOK = False + + def startTagXmp(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.reconstructActiveFormattingElements() + self.parser.framesetOK = False + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagTable(self, token): + if self.parser.compatMode != "quirks": + if self.tree.elementInScope("p", variant="button"): + self.processEndTag(impliedTagToken("p")) + self.tree.insertElement(token) + self.parser.framesetOK = False + self.parser.phase = self.parser.phases["inTable"] + + def startTagVoidFormatting(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + self.parser.framesetOK = False + + def startTagInput(self, token): + framesetOK = self.parser.framesetOK + self.startTagVoidFormatting(token) + if ("type" in token["data"] and + token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): + # input type=hidden doesn't change framesetOK + self.parser.framesetOK = framesetOK + + def startTagParamSource(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagHr(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + self.parser.framesetOK = False + + def startTagImage(self, token): + # No really... + self.parser.parseError("unexpected-start-tag-treated-as", + {"originalName": "image", "newName": "img"}) + self.processStartTag(impliedTagToken("img", "StartTag", + attributes=token["data"], + selfClosing=token["selfClosing"])) + + def startTagIsIndex(self, token): + self.parser.parseError("deprecated-tag", {"name": "isindex"}) + if self.tree.formPointer: + return + form_attrs = {} + if "action" in token["data"]: + form_attrs["action"] = token["data"]["action"] + self.processStartTag(impliedTagToken("form", "StartTag", + attributes=form_attrs)) + self.processStartTag(impliedTagToken("hr", "StartTag")) + self.processStartTag(impliedTagToken("label", "StartTag")) + # XXX Localization ... + if "prompt" in token["data"]: + prompt = token["data"]["prompt"] + else: + prompt = "This is a searchable index. Enter search keywords: " + self.processCharacters( + {"type": tokenTypes["Characters"], "data": prompt}) + attributes = token["data"].copy() + if "action" in attributes: + del attributes["action"] + if "prompt" in attributes: + del attributes["prompt"] + attributes["name"] = "isindex" + self.processStartTag(impliedTagToken("input", "StartTag", + attributes=attributes, + selfClosing= + token["selfClosing"])) + self.processEndTag(impliedTagToken("label")) + self.processStartTag(impliedTagToken("hr", "StartTag")) + self.processEndTag(impliedTagToken("form")) + + def startTagTextarea(self, token): + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.rcdataState + self.processSpaceCharacters = self.processSpaceCharactersDropNewline + self.parser.framesetOK = False + + def startTagIFrame(self, token): + self.parser.framesetOK = False + self.startTagRawtext(token) + + def startTagRawtext(self, token): + """iframe, noembed noframes, noscript(if scripting enabled)""" + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagOpt(self, token): + if self.tree.openElements[-1].name == "option": + self.parser.phase.processEndTag(impliedTagToken("option")) + self.tree.reconstructActiveFormattingElements() + self.parser.tree.insertElement(token) + + def startTagSelect(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + self.parser.framesetOK = False + if self.parser.phase in (self.parser.phases["inTable"], + self.parser.phases["inCaption"], + self.parser.phases["inColumnGroup"], + self.parser.phases["inTableBody"], + self.parser.phases["inRow"], + self.parser.phases["inCell"]): + self.parser.phase = self.parser.phases["inSelectInTable"] + else: + self.parser.phase = self.parser.phases["inSelect"] + + def startTagRpRt(self, token): + if self.tree.elementInScope("ruby"): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != "ruby": + self.parser.parseError() + self.tree.insertElement(token) + + def startTagMath(self, token): + self.tree.reconstructActiveFormattingElements() + self.parser.adjustMathMLAttributes(token) + self.parser.adjustForeignAttributes(token) + token["namespace"] = namespaces["mathml"] + self.tree.insertElement(token) + # Need to get the parse error right for the case where the token + # has a namespace not equal to the xmlns attribute + if token["selfClosing"]: + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagSvg(self, token): + self.tree.reconstructActiveFormattingElements() + self.parser.adjustSVGAttributes(token) + self.parser.adjustForeignAttributes(token) + token["namespace"] = namespaces["svg"] + self.tree.insertElement(token) + # Need to get the parse error right for the case where the token + # has a namespace not equal to the xmlns attribute + if token["selfClosing"]: + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagMisplaced(self, token): + """ Elements that should be children of other elements that have a + different insertion mode; here they are ignored + "caption", "col", "colgroup", "frame", "frameset", "head", + "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", + "tr", "noscript" + """ + self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]}) + + def startTagOther(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + + def endTagP(self, token): + if not self.tree.elementInScope("p", variant="button"): + self.startTagCloseP(impliedTagToken("p", "StartTag")) + self.parser.parseError("unexpected-end-tag", {"name": "p"}) + self.endTagP(impliedTagToken("p", "EndTag")) + else: + self.tree.generateImpliedEndTags("p") + if self.tree.openElements[-1].name != "p": + self.parser.parseError("unexpected-end-tag", {"name": "p"}) + node = self.tree.openElements.pop() + while node.name != "p": + node = self.tree.openElements.pop() + + def endTagBody(self, token): + if not self.tree.elementInScope("body"): + self.parser.parseError() + return + elif self.tree.openElements[-1].name != "body": + for node in self.tree.openElements[2:]: + if node.name not in frozenset(("dd", "dt", "li", "optgroup", + "option", "p", "rp", "rt", + "tbody", "td", "tfoot", + "th", "thead", "tr", "body", + "html")): + # Not sure this is the correct name for the parse error + self.parser.parseError( + "expected-one-end-tag-but-got-another", + {"expectedName": "body", "gotName": node.name}) + break + self.parser.phase = self.parser.phases["afterBody"] + + def endTagHtml(self, token): + # We repeat the test for the body end tag token being ignored here + if self.tree.elementInScope("body"): + self.endTagBody(impliedTagToken("body")) + return token + + def endTagBlock(self, token): + # Put us back in the right whitespace handling mode + if token["name"] == "pre": + self.processSpaceCharacters = self.processSpaceCharactersNonPre + inScope = self.tree.elementInScope(token["name"]) + if inScope: + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("end-tag-too-early", {"name": token["name"]}) + if inScope: + node = self.tree.openElements.pop() + while node.name != token["name"]: + node = self.tree.openElements.pop() + + def endTagForm(self, token): + node = self.tree.formPointer + self.tree.formPointer = None + if node is None or not self.tree.elementInScope(node): + self.parser.parseError("unexpected-end-tag", + {"name": "form"}) + else: + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1] != node: + self.parser.parseError("end-tag-too-early-ignored", + {"name": "form"}) + self.tree.openElements.remove(node) + + def endTagListItem(self, token): + if token["name"] == "li": + variant = "list" + else: + variant = None + if not self.tree.elementInScope(token["name"], variant=variant): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + else: + self.tree.generateImpliedEndTags(exclude=token["name"]) + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError( + "end-tag-too-early", + {"name": token["name"]}) + node = self.tree.openElements.pop() + while node.name != token["name"]: + node = self.tree.openElements.pop() + + def endTagHeading(self, token): + for item in headingElements: + if self.tree.elementInScope(item): + self.tree.generateImpliedEndTags() + break + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("end-tag-too-early", {"name": token["name"]}) + + for item in headingElements: + if self.tree.elementInScope(item): + item = self.tree.openElements.pop() + while item.name not in headingElements: + item = self.tree.openElements.pop() + break + + def endTagFormatting(self, token): + """The much-feared adoption agency algorithm""" + # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867 + # XXX Better parseError messages appreciated. + + # Step 1 + outerLoopCounter = 0 + + # Step 2 + while outerLoopCounter < 8: + + # Step 3 + outerLoopCounter += 1 + + # Step 4: + + # Let the formatting element be the last element in + # the list of active formatting elements that: + # - is between the end of the list and the last scope + # marker in the list, if any, or the start of the list + # otherwise, and + # - has the same tag name as the token. + formattingElement = self.tree.elementInActiveFormattingElements( + token["name"]) + if (not formattingElement or + (formattingElement in self.tree.openElements and + not self.tree.elementInScope(formattingElement.name))): + # If there is no such node, then abort these steps + # and instead act as described in the "any other + # end tag" entry below. + self.endTagOther(token) + return + + # Otherwise, if there is such a node, but that node is + # not in the stack of open elements, then this is a + # parse error; remove the element from the list, and + # abort these steps. + elif formattingElement not in self.tree.openElements: + self.parser.parseError("adoption-agency-1.2", {"name": token["name"]}) + self.tree.activeFormattingElements.remove(formattingElement) + return + + # Otherwise, if there is such a node, and that node is + # also in the stack of open elements, but the element + # is not in scope, then this is a parse error; ignore + # the token, and abort these steps. + elif not self.tree.elementInScope(formattingElement.name): + self.parser.parseError("adoption-agency-4.4", {"name": token["name"]}) + return + + # Otherwise, there is a formatting element and that + # element is in the stack and is in scope. If the + # element is not the current node, this is a parse + # error. In any case, proceed with the algorithm as + # written in the following steps. + else: + if formattingElement != self.tree.openElements[-1]: + self.parser.parseError("adoption-agency-1.3", {"name": token["name"]}) + + # Step 5: + + # Let the furthest block be the topmost node in the + # stack of open elements that is lower in the stack + # than the formatting element, and is an element in + # the special category. There might not be one. + afeIndex = self.tree.openElements.index(formattingElement) + furthestBlock = None + for element in self.tree.openElements[afeIndex:]: + if element.nameTuple in specialElements: + furthestBlock = element + break + + # Step 6: + + # If there is no furthest block, then the UA must + # first pop all the nodes from the bottom of the stack + # of open elements, from the current node up to and + # including the formatting element, then remove the + # formatting element from the list of active + # formatting elements, and finally abort these steps. + if furthestBlock is None: + element = self.tree.openElements.pop() + while element != formattingElement: + element = self.tree.openElements.pop() + self.tree.activeFormattingElements.remove(element) + return + + # Step 7 + commonAncestor = self.tree.openElements[afeIndex - 1] + + # Step 8: + # The bookmark is supposed to help us identify where to reinsert + # nodes in step 15. We have to ensure that we reinsert nodes after + # the node before the active formatting element. Note the bookmark + # can move in step 9.7 + bookmark = self.tree.activeFormattingElements.index(formattingElement) + + # Step 9 + lastNode = node = furthestBlock + innerLoopCounter = 0 + + index = self.tree.openElements.index(node) + while innerLoopCounter < 3: + innerLoopCounter += 1 + # Node is element before node in open elements + index -= 1 + node = self.tree.openElements[index] + if node not in self.tree.activeFormattingElements: + self.tree.openElements.remove(node) + continue + # Step 9.6 + if node == formattingElement: + break + # Step 9.7 + if lastNode == furthestBlock: + bookmark = self.tree.activeFormattingElements.index(node) + 1 + # Step 9.8 + clone = node.cloneNode() + # Replace node with clone + self.tree.activeFormattingElements[ + self.tree.activeFormattingElements.index(node)] = clone + self.tree.openElements[ + self.tree.openElements.index(node)] = clone + node = clone + # Step 9.9 + # Remove lastNode from its parents, if any + if lastNode.parent: + lastNode.parent.removeChild(lastNode) + node.appendChild(lastNode) + # Step 9.10 + lastNode = node + + # Step 10 + # Foster parent lastNode if commonAncestor is a + # table, tbody, tfoot, thead, or tr we need to foster + # parent the lastNode + if lastNode.parent: + lastNode.parent.removeChild(lastNode) + + if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")): + parent, insertBefore = self.tree.getTableMisnestedNodePosition() + parent.insertBefore(lastNode, insertBefore) + else: + commonAncestor.appendChild(lastNode) + + # Step 11 + clone = formattingElement.cloneNode() + + # Step 12 + furthestBlock.reparentChildren(clone) + + # Step 13 + furthestBlock.appendChild(clone) + + # Step 14 + self.tree.activeFormattingElements.remove(formattingElement) + self.tree.activeFormattingElements.insert(bookmark, clone) + + # Step 15 + self.tree.openElements.remove(formattingElement) + self.tree.openElements.insert( + self.tree.openElements.index(furthestBlock) + 1, clone) + + def endTagAppletMarqueeObject(self, token): + if self.tree.elementInScope(token["name"]): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("end-tag-too-early", {"name": token["name"]}) + + if self.tree.elementInScope(token["name"]): + element = self.tree.openElements.pop() + while element.name != token["name"]: + element = self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + + def endTagBr(self, token): + self.parser.parseError("unexpected-end-tag-treated-as", + {"originalName": "br", "newName": "br element"}) + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(impliedTagToken("br", "StartTag")) + self.tree.openElements.pop() + + def endTagOther(self, token): + for node in self.tree.openElements[::-1]: + if node.name == token["name"]: + self.tree.generateImpliedEndTags(exclude=token["name"]) + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + while self.tree.openElements.pop() != node: + pass + break + else: + if node.nameTuple in specialElements: + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + break + + class TextPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([]) + self.startTagHandler.default = self.startTagOther + self.endTagHandler = utils.MethodDispatcher([ + ("script", self.endTagScript)]) + self.endTagHandler.default = self.endTagOther + + def processCharacters(self, token): + self.tree.insertText(token["data"]) + + def processEOF(self): + self.parser.parseError("expected-named-closing-tag-but-got-eof", + {"name": self.tree.openElements[-1].name}) + self.tree.openElements.pop() + self.parser.phase = self.parser.originalPhase + return True + + def startTagOther(self, token): + assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name'] + + def endTagScript(self, token): + node = self.tree.openElements.pop() + assert node.name == "script" + self.parser.phase = self.parser.originalPhase + # The rest of this method is all stuff that only happens if + # document.write works + + def endTagOther(self, token): + self.tree.openElements.pop() + self.parser.phase = self.parser.originalPhase + + class InTablePhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-table + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("caption", self.startTagCaption), + ("colgroup", self.startTagColgroup), + ("col", self.startTagCol), + (("tbody", "tfoot", "thead"), self.startTagRowGroup), + (("td", "th", "tr"), self.startTagImplyTbody), + ("table", self.startTagTable), + (("style", "script"), self.startTagStyleScript), + ("input", self.startTagInput), + ("form", self.startTagForm) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("table", self.endTagTable), + (("body", "caption", "col", "colgroup", "html", "tbody", "td", + "tfoot", "th", "thead", "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods + def clearStackToTableContext(self): + # "clear the stack back to a table context" + while self.tree.openElements[-1].name not in ("table", "html"): + # self.parser.parseError("unexpected-implied-end-tag-in-table", + # {"name": self.tree.openElements[-1].name}) + self.tree.openElements.pop() + # When the current node is <html> it's an innerHTML case + + # processing methods + def processEOF(self): + if self.tree.openElements[-1].name != "html": + self.parser.parseError("eof-in-table") + else: + assert self.parser.innerHTML + # Stop parsing + + def processSpaceCharacters(self, token): + originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["inTableText"] + self.parser.phase.originalPhase = originalPhase + self.parser.phase.processSpaceCharacters(token) + + def processCharacters(self, token): + originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["inTableText"] + self.parser.phase.originalPhase = originalPhase + self.parser.phase.processCharacters(token) + + def insertText(self, token): + # If we get here there must be at least one non-whitespace character + # Do the table magic! + self.tree.insertFromTable = True + self.parser.phases["inBody"].processCharacters(token) + self.tree.insertFromTable = False + + def startTagCaption(self, token): + self.clearStackToTableContext() + self.tree.activeFormattingElements.append(Marker) + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inCaption"] + + def startTagColgroup(self, token): + self.clearStackToTableContext() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inColumnGroup"] + + def startTagCol(self, token): + self.startTagColgroup(impliedTagToken("colgroup", "StartTag")) + return token + + def startTagRowGroup(self, token): + self.clearStackToTableContext() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inTableBody"] + + def startTagImplyTbody(self, token): + self.startTagRowGroup(impliedTagToken("tbody", "StartTag")) + return token + + def startTagTable(self, token): + self.parser.parseError("unexpected-start-tag-implies-end-tag", + {"startName": "table", "endName": "table"}) + self.parser.phase.processEndTag(impliedTagToken("table")) + if not self.parser.innerHTML: + return token + + def startTagStyleScript(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagInput(self, token): + if ("type" in token["data"] and + token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): + self.parser.parseError("unexpected-hidden-input-in-table") + self.tree.insertElement(token) + # XXX associate with form + self.tree.openElements.pop() + else: + self.startTagOther(token) + + def startTagForm(self, token): + self.parser.parseError("unexpected-form-in-table") + if self.tree.formPointer is None: + self.tree.insertElement(token) + self.tree.formPointer = self.tree.openElements[-1] + self.tree.openElements.pop() + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]}) + # Do the table magic! + self.tree.insertFromTable = True + self.parser.phases["inBody"].processStartTag(token) + self.tree.insertFromTable = False + + def endTagTable(self, token): + if self.tree.elementInScope("table", variant="table"): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != "table": + self.parser.parseError("end-tag-too-early-named", + {"gotName": "table", + "expectedName": self.tree.openElements[-1].name}) + while self.tree.openElements[-1].name != "table": + self.tree.openElements.pop() + self.tree.openElements.pop() + self.parser.resetInsertionMode() + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]}) + # Do the table magic! + self.tree.insertFromTable = True + self.parser.phases["inBody"].processEndTag(token) + self.tree.insertFromTable = False + + class InTableTextPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.originalPhase = None + self.characterTokens = [] + + def flushCharacters(self): + data = "".join([item["data"] for item in self.characterTokens]) + if any([item not in spaceCharacters for item in data]): + token = {"type": tokenTypes["Characters"], "data": data} + self.parser.phases["inTable"].insertText(token) + elif data: + self.tree.insertText(data) + self.characterTokens = [] + + def processComment(self, token): + self.flushCharacters() + self.parser.phase = self.originalPhase + return token + + def processEOF(self): + self.flushCharacters() + self.parser.phase = self.originalPhase + return True + + def processCharacters(self, token): + if token["data"] == "\u0000": + return + self.characterTokens.append(token) + + def processSpaceCharacters(self, token): + # pretty sure we should never reach here + self.characterTokens.append(token) + # assert False + + def processStartTag(self, token): + self.flushCharacters() + self.parser.phase = self.originalPhase + return token + + def processEndTag(self, token): + self.flushCharacters() + self.parser.phase = self.originalPhase + return token + + class InCaptionPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-caption + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.startTagTableElement) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("caption", self.endTagCaption), + ("table", self.endTagTable), + (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + def ignoreEndTagCaption(self): + return not self.tree.elementInScope("caption", variant="table") + + def processEOF(self): + self.parser.phases["inBody"].processEOF() + + def processCharacters(self, token): + return self.parser.phases["inBody"].processCharacters(token) + + def startTagTableElement(self, token): + self.parser.parseError() + # XXX Have to duplicate logic here to find out if the tag is ignored + ignoreEndTag = self.ignoreEndTagCaption() + self.parser.phase.processEndTag(impliedTagToken("caption")) + if not ignoreEndTag: + return token + + def startTagOther(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def endTagCaption(self, token): + if not self.ignoreEndTagCaption(): + # AT this code is quite similar to endTagTable in "InTable" + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != "caption": + self.parser.parseError("expected-one-end-tag-but-got-another", + {"gotName": "caption", + "expectedName": self.tree.openElements[-1].name}) + while self.tree.openElements[-1].name != "caption": + self.tree.openElements.pop() + self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + self.parser.phase = self.parser.phases["inTable"] + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagTable(self, token): + self.parser.parseError() + ignoreEndTag = self.ignoreEndTagCaption() + self.parser.phase.processEndTag(impliedTagToken("caption")) + if not ignoreEndTag: + return token + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def endTagOther(self, token): + return self.parser.phases["inBody"].processEndTag(token) + + class InColumnGroupPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-column + + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("col", self.startTagCol) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("colgroup", self.endTagColgroup), + ("col", self.endTagCol) + ]) + self.endTagHandler.default = self.endTagOther + + def ignoreEndTagColgroup(self): + return self.tree.openElements[-1].name == "html" + + def processEOF(self): + if self.tree.openElements[-1].name == "html": + assert self.parser.innerHTML + return + else: + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup(impliedTagToken("colgroup")) + if not ignoreEndTag: + return True + + def processCharacters(self, token): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup(impliedTagToken("colgroup")) + if not ignoreEndTag: + return token + + def startTagCol(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + + def startTagOther(self, token): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup(impliedTagToken("colgroup")) + if not ignoreEndTag: + return token + + def endTagColgroup(self, token): + if self.ignoreEndTagColgroup(): + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + else: + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTable"] + + def endTagCol(self, token): + self.parser.parseError("no-end-tag", {"name": "col"}) + + def endTagOther(self, token): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup(impliedTagToken("colgroup")) + if not ignoreEndTag: + return token + + class InTableBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-table0 + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("tr", self.startTagTr), + (("td", "th"), self.startTagTableCell), + (("caption", "col", "colgroup", "tbody", "tfoot", "thead"), + self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), + ("table", self.endTagTable), + (("body", "caption", "col", "colgroup", "html", "td", "th", + "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods + def clearStackToTableBodyContext(self): + while self.tree.openElements[-1].name not in ("tbody", "tfoot", + "thead", "html"): + # self.parser.parseError("unexpected-implied-end-tag-in-table", + # {"name": self.tree.openElements[-1].name}) + self.tree.openElements.pop() + if self.tree.openElements[-1].name == "html": + assert self.parser.innerHTML + + # the rest + def processEOF(self): + self.parser.phases["inTable"].processEOF() + + def processSpaceCharacters(self, token): + return self.parser.phases["inTable"].processSpaceCharacters(token) + + def processCharacters(self, token): + return self.parser.phases["inTable"].processCharacters(token) + + def startTagTr(self, token): + self.clearStackToTableBodyContext() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inRow"] + + def startTagTableCell(self, token): + self.parser.parseError("unexpected-cell-in-table-body", + {"name": token["name"]}) + self.startTagTr(impliedTagToken("tr", "StartTag")) + return token + + def startTagTableOther(self, token): + # XXX AT Any ideas on how to share this with endTagTable? + if (self.tree.elementInScope("tbody", variant="table") or + self.tree.elementInScope("thead", variant="table") or + self.tree.elementInScope("tfoot", variant="table")): + self.clearStackToTableBodyContext() + self.endTagTableRowGroup( + impliedTagToken(self.tree.openElements[-1].name)) + return token + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def startTagOther(self, token): + return self.parser.phases["inTable"].processStartTag(token) + + def endTagTableRowGroup(self, token): + if self.tree.elementInScope(token["name"], variant="table"): + self.clearStackToTableBodyContext() + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTable"] + else: + self.parser.parseError("unexpected-end-tag-in-table-body", + {"name": token["name"]}) + + def endTagTable(self, token): + if (self.tree.elementInScope("tbody", variant="table") or + self.tree.elementInScope("thead", variant="table") or + self.tree.elementInScope("tfoot", variant="table")): + self.clearStackToTableBodyContext() + self.endTagTableRowGroup( + impliedTagToken(self.tree.openElements[-1].name)) + return token + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag-in-table-body", + {"name": token["name"]}) + + def endTagOther(self, token): + return self.parser.phases["inTable"].processEndTag(token) + + class InRowPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-row + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("td", "th"), self.startTagTableCell), + (("caption", "col", "colgroup", "tbody", "tfoot", "thead", + "tr"), self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("tr", self.endTagTr), + ("table", self.endTagTable), + (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), + (("body", "caption", "col", "colgroup", "html", "td", "th"), + self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods (XXX unify this with other table helper methods) + def clearStackToTableRowContext(self): + while self.tree.openElements[-1].name not in ("tr", "html"): + self.parser.parseError("unexpected-implied-end-tag-in-table-row", + {"name": self.tree.openElements[-1].name}) + self.tree.openElements.pop() + + def ignoreEndTagTr(self): + return not self.tree.elementInScope("tr", variant="table") + + # the rest + def processEOF(self): + self.parser.phases["inTable"].processEOF() + + def processSpaceCharacters(self, token): + return self.parser.phases["inTable"].processSpaceCharacters(token) + + def processCharacters(self, token): + return self.parser.phases["inTable"].processCharacters(token) + + def startTagTableCell(self, token): + self.clearStackToTableRowContext() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inCell"] + self.tree.activeFormattingElements.append(Marker) + + def startTagTableOther(self, token): + ignoreEndTag = self.ignoreEndTagTr() + self.endTagTr(impliedTagToken("tr")) + # XXX how are we sure it's always ignored in the innerHTML case? + if not ignoreEndTag: + return token + + def startTagOther(self, token): + return self.parser.phases["inTable"].processStartTag(token) + + def endTagTr(self, token): + if not self.ignoreEndTagTr(): + self.clearStackToTableRowContext() + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTableBody"] + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagTable(self, token): + ignoreEndTag = self.ignoreEndTagTr() + self.endTagTr(impliedTagToken("tr")) + # Reprocess the current tag if the tr end tag was not ignored + # XXX how are we sure it's always ignored in the innerHTML case? + if not ignoreEndTag: + return token + + def endTagTableRowGroup(self, token): + if self.tree.elementInScope(token["name"], variant="table"): + self.endTagTr(impliedTagToken("tr")) + return token + else: + self.parser.parseError() + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag-in-table-row", + {"name": token["name"]}) + + def endTagOther(self, token): + return self.parser.phases["inTable"].processEndTag(token) + + class InCellPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-cell + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("td", "th"), self.endTagTableCell), + (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore), + (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply) + ]) + self.endTagHandler.default = self.endTagOther + + # helper + def closeCell(self): + if self.tree.elementInScope("td", variant="table"): + self.endTagTableCell(impliedTagToken("td")) + elif self.tree.elementInScope("th", variant="table"): + self.endTagTableCell(impliedTagToken("th")) + + # the rest + def processEOF(self): + self.parser.phases["inBody"].processEOF() + + def processCharacters(self, token): + return self.parser.phases["inBody"].processCharacters(token) + + def startTagTableOther(self, token): + if (self.tree.elementInScope("td", variant="table") or + self.tree.elementInScope("th", variant="table")): + self.closeCell() + return token + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def startTagOther(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def endTagTableCell(self, token): + if self.tree.elementInScope(token["name"], variant="table"): + self.tree.generateImpliedEndTags(token["name"]) + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("unexpected-cell-end-tag", + {"name": token["name"]}) + while True: + node = self.tree.openElements.pop() + if node.name == token["name"]: + break + else: + self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + self.parser.phase = self.parser.phases["inRow"] + else: + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def endTagImply(self, token): + if self.tree.elementInScope(token["name"], variant="table"): + self.closeCell() + return token + else: + # sometimes innerHTML case + self.parser.parseError() + + def endTagOther(self, token): + return self.parser.phases["inBody"].processEndTag(token) + + class InSelectPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("option", self.startTagOption), + ("optgroup", self.startTagOptgroup), + ("select", self.startTagSelect), + (("input", "keygen", "textarea"), self.startTagInput), + ("script", self.startTagScript) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("option", self.endTagOption), + ("optgroup", self.endTagOptgroup), + ("select", self.endTagSelect) + ]) + self.endTagHandler.default = self.endTagOther + + # http://www.whatwg.org/specs/web-apps/current-work/#in-select + def processEOF(self): + if self.tree.openElements[-1].name != "html": + self.parser.parseError("eof-in-select") + else: + assert self.parser.innerHTML + + def processCharacters(self, token): + if token["data"] == "\u0000": + return + self.tree.insertText(token["data"]) + + def startTagOption(self, token): + # We need to imply </option> if <option> is the current node. + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + self.tree.insertElement(token) + + def startTagOptgroup(self, token): + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + if self.tree.openElements[-1].name == "optgroup": + self.tree.openElements.pop() + self.tree.insertElement(token) + + def startTagSelect(self, token): + self.parser.parseError("unexpected-select-in-select") + self.endTagSelect(impliedTagToken("select")) + + def startTagInput(self, token): + self.parser.parseError("unexpected-input-in-select") + if self.tree.elementInScope("select", variant="select"): + self.endTagSelect(impliedTagToken("select")) + return token + else: + assert self.parser.innerHTML + + def startTagScript(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-in-select", + {"name": token["name"]}) + + def endTagOption(self, token): + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + else: + self.parser.parseError("unexpected-end-tag-in-select", + {"name": "option"}) + + def endTagOptgroup(self, token): + # </optgroup> implicitly closes <option> + if (self.tree.openElements[-1].name == "option" and + self.tree.openElements[-2].name == "optgroup"): + self.tree.openElements.pop() + # It also closes </optgroup> + if self.tree.openElements[-1].name == "optgroup": + self.tree.openElements.pop() + # But nothing else + else: + self.parser.parseError("unexpected-end-tag-in-select", + {"name": "optgroup"}) + + def endTagSelect(self, token): + if self.tree.elementInScope("select", variant="select"): + node = self.tree.openElements.pop() + while node.name != "select": + node = self.tree.openElements.pop() + self.parser.resetInsertionMode() + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-in-select", + {"name": token["name"]}) + + class InSelectInTablePhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), + self.startTagTable) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), + self.endTagTable) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.parser.phases["inSelect"].processEOF() + + def processCharacters(self, token): + return self.parser.phases["inSelect"].processCharacters(token) + + def startTagTable(self, token): + self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]}) + self.endTagOther(impliedTagToken("select")) + return token + + def startTagOther(self, token): + return self.parser.phases["inSelect"].processStartTag(token) + + def endTagTable(self, token): + self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]}) + if self.tree.elementInScope(token["name"], variant="table"): + self.endTagOther(impliedTagToken("select")) + return token + + def endTagOther(self, token): + return self.parser.phases["inSelect"].processEndTag(token) + + class InForeignContentPhase(Phase): + breakoutElements = frozenset(["b", "big", "blockquote", "body", "br", + "center", "code", "dd", "div", "dl", "dt", + "em", "embed", "h1", "h2", "h3", + "h4", "h5", "h6", "head", "hr", "i", "img", + "li", "listing", "menu", "meta", "nobr", + "ol", "p", "pre", "ruby", "s", "small", + "span", "strong", "strike", "sub", "sup", + "table", "tt", "u", "ul", "var"]) + + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + def adjustSVGTagNames(self, token): + replacements = {"altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath"} + + if token["name"] in replacements: + token["name"] = replacements[token["name"]] + + def processCharacters(self, token): + if token["data"] == "\u0000": + token["data"] = "\uFFFD" + elif (self.parser.framesetOK and + any(char not in spaceCharacters for char in token["data"])): + self.parser.framesetOK = False + Phase.processCharacters(self, token) + + def processStartTag(self, token): + currentNode = self.tree.openElements[-1] + if (token["name"] in self.breakoutElements or + (token["name"] == "font" and + set(token["data"].keys()) & set(["color", "face", "size"]))): + self.parser.parseError("unexpected-html-element-in-foreign-content", + {"name": token["name"]}) + while (self.tree.openElements[-1].namespace != + self.tree.defaultNamespace and + not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and + not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])): + self.tree.openElements.pop() + return token + + else: + if currentNode.namespace == namespaces["mathml"]: + self.parser.adjustMathMLAttributes(token) + elif currentNode.namespace == namespaces["svg"]: + self.adjustSVGTagNames(token) + self.parser.adjustSVGAttributes(token) + self.parser.adjustForeignAttributes(token) + token["namespace"] = currentNode.namespace + self.tree.insertElement(token) + if token["selfClosing"]: + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def processEndTag(self, token): + nodeIndex = len(self.tree.openElements) - 1 + node = self.tree.openElements[-1] + if node.name != token["name"]: + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + while True: + if node.name.translate(asciiUpper2Lower) == token["name"]: + # XXX this isn't in the spec but it seems necessary + if self.parser.phase == self.parser.phases["inTableText"]: + self.parser.phase.flushCharacters() + self.parser.phase = self.parser.phase.originalPhase + while self.tree.openElements.pop() != node: + assert self.tree.openElements + new_token = None + break + nodeIndex -= 1 + + node = self.tree.openElements[nodeIndex] + if node.namespace != self.tree.defaultNamespace: + continue + else: + new_token = self.parser.phase.processEndTag(token) + break + return new_token + + class AfterBodyPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + # Stop parsing + pass + + def processComment(self, token): + # This is needed because data is to be appended to the <html> element + # here and not to whatever is currently open. + self.tree.insertComment(token, self.tree.openElements[0]) + + def processCharacters(self, token): + self.parser.parseError("unexpected-char-after-body") + self.parser.phase = self.parser.phases["inBody"] + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-after-body", + {"name": token["name"]}) + self.parser.phase = self.parser.phases["inBody"] + return token + + def endTagHtml(self, name): + if self.parser.innerHTML: + self.parser.parseError("unexpected-end-tag-after-body-innerhtml") + else: + self.parser.phase = self.parser.phases["afterAfterBody"] + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-after-body", + {"name": token["name"]}) + self.parser.phase = self.parser.phases["inBody"] + return token + + class InFramesetPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("frameset", self.startTagFrameset), + ("frame", self.startTagFrame), + ("noframes", self.startTagNoframes) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("frameset", self.endTagFrameset) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + if self.tree.openElements[-1].name != "html": + self.parser.parseError("eof-in-frameset") + else: + assert self.parser.innerHTML + + def processCharacters(self, token): + self.parser.parseError("unexpected-char-in-frameset") + + def startTagFrameset(self, token): + self.tree.insertElement(token) + + def startTagFrame(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + + def startTagNoframes(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-in-frameset", + {"name": token["name"]}) + + def endTagFrameset(self, token): + if self.tree.openElements[-1].name == "html": + # innerHTML case + self.parser.parseError("unexpected-frameset-in-frameset-innerhtml") + else: + self.tree.openElements.pop() + if (not self.parser.innerHTML and + self.tree.openElements[-1].name != "frameset"): + # If we're not in innerHTML mode and the the current node is not a + # "frameset" element (anymore) then switch. + self.parser.phase = self.parser.phases["afterFrameset"] + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-in-frameset", + {"name": token["name"]}) + + class AfterFramesetPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#after3 + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("noframes", self.startTagNoframes) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("html", self.endTagHtml) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + # Stop parsing + pass + + def processCharacters(self, token): + self.parser.parseError("unexpected-char-after-frameset") + + def startTagNoframes(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-after-frameset", + {"name": token["name"]}) + + def endTagHtml(self, token): + self.parser.phase = self.parser.phases["afterAfterFrameset"] + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-after-frameset", + {"name": token["name"]}) + + class AfterAfterBodyPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml) + ]) + self.startTagHandler.default = self.startTagOther + + def processEOF(self): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + return self.parser.phases["inBody"].processSpaceCharacters(token) + + def processCharacters(self, token): + self.parser.parseError("expected-eof-but-got-char") + self.parser.phase = self.parser.phases["inBody"] + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("expected-eof-but-got-start-tag", + {"name": token["name"]}) + self.parser.phase = self.parser.phases["inBody"] + return token + + def processEndTag(self, token): + self.parser.parseError("expected-eof-but-got-end-tag", + {"name": token["name"]}) + self.parser.phase = self.parser.phases["inBody"] + return token + + class AfterAfterFramesetPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("noframes", self.startTagNoFrames) + ]) + self.startTagHandler.default = self.startTagOther + + def processEOF(self): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + return self.parser.phases["inBody"].processSpaceCharacters(token) + + def processCharacters(self, token): + self.parser.parseError("expected-eof-but-got-char") + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagNoFrames(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("expected-eof-but-got-start-tag", + {"name": token["name"]}) + + def processEndTag(self, token): + self.parser.parseError("expected-eof-but-got-end-tag", + {"name": token["name"]}) + + return { + "initial": InitialPhase, + "beforeHtml": BeforeHtmlPhase, + "beforeHead": BeforeHeadPhase, + "inHead": InHeadPhase, + # XXX "inHeadNoscript": InHeadNoScriptPhase, + "afterHead": AfterHeadPhase, + "inBody": InBodyPhase, + "text": TextPhase, + "inTable": InTablePhase, + "inTableText": InTableTextPhase, + "inCaption": InCaptionPhase, + "inColumnGroup": InColumnGroupPhase, + "inTableBody": InTableBodyPhase, + "inRow": InRowPhase, + "inCell": InCellPhase, + "inSelect": InSelectPhase, + "inSelectInTable": InSelectInTablePhase, + "inForeignContent": InForeignContentPhase, + "afterBody": AfterBodyPhase, + "inFrameset": InFramesetPhase, + "afterFrameset": AfterFramesetPhase, + "afterAfterBody": AfterAfterBodyPhase, + "afterAfterFrameset": AfterAfterFramesetPhase, + # XXX after after frameset + } + + +def impliedTagToken(name, type="EndTag", attributes=None, + selfClosing=False): + if attributes is None: + attributes = {} + return {"type": tokenTypes[type], "name": name, "data": attributes, + "selfClosing": selfClosing} + + +class ParseError(Exception): + """Error in parsed document""" + pass diff --git a/awx/lib/site-packages/pip/vendor/html5lib/ihatexml.py b/awx/lib/site-packages/pip/vendor/html5lib/ihatexml.py new file mode 100644 index 0000000000..0fc79308ef --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/ihatexml.py @@ -0,0 +1,285 @@ +from __future__ import absolute_import, division, unicode_literals + +import re +import warnings + +from .constants import DataLossWarning + +baseChar = """ +[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | +[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | +[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | +[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | +[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | +[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | +[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | +[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | +[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | +[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | +[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | +[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | +[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | +[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | +[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | +[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | +[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | +[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | +[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | +[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | +[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | +[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | +[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | +[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | +[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | +[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | +[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | +[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | +[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | +[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | +#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | +#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | +#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | +[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | +[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | +#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | +[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | +[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | +[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | +[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | +[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | +#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | +[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | +[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | +[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | +[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]""" + +ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]""" + +combiningCharacter = """ +[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | +[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | +[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | +[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | +#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | +[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | +[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | +#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | +[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | +[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | +#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | +[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | +[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | +[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | +[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | +[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | +#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | +[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | +#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | +[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | +[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | +#x3099 | #x309A""" + +digit = """ +[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | +[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | +[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | +[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]""" + +extender = """ +#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | +#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]""" + +letter = " | ".join([baseChar, ideographic]) + +# Without the +name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter, + extender]) +nameFirst = " | ".join([letter, "_"]) + +reChar = re.compile(r"#x([\d|A-F]{4,4})") +reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]") + + +def charStringToList(chars): + charRanges = [item.strip() for item in chars.split(" | ")] + rv = [] + for item in charRanges: + foundMatch = False + for regexp in (reChar, reCharRange): + match = regexp.match(item) + if match is not None: + rv.append([hexToInt(item) for item in match.groups()]) + if len(rv[-1]) == 1: + rv[-1] = rv[-1] * 2 + foundMatch = True + break + if not foundMatch: + assert len(item) == 1 + + rv.append([ord(item)] * 2) + rv = normaliseCharList(rv) + return rv + + +def normaliseCharList(charList): + charList = sorted(charList) + for item in charList: + assert item[1] >= item[0] + rv = [] + i = 0 + while i < len(charList): + j = 1 + rv.append(charList[i]) + while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1: + rv[-1][1] = charList[i + j][1] + j += 1 + i += j + return rv + +# We don't really support characters above the BMP :( +max_unicode = int("FFFF", 16) + + +def missingRanges(charList): + rv = [] + if charList[0] != 0: + rv.append([0, charList[0][0] - 1]) + for i, item in enumerate(charList[:-1]): + rv.append([item[1] + 1, charList[i + 1][0] - 1]) + if charList[-1][1] != max_unicode: + rv.append([charList[-1][1] + 1, max_unicode]) + return rv + + +def listToRegexpStr(charList): + rv = [] + for item in charList: + if item[0] == item[1]: + rv.append(escapeRegexp(chr(item[0]))) + else: + rv.append(escapeRegexp(chr(item[0])) + "-" + + escapeRegexp(chr(item[1]))) + return "[%s]" % "".join(rv) + + +def hexToInt(hex_str): + return int(hex_str, 16) + + +def escapeRegexp(string): + specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}", + "[", "]", "|", "(", ")", "-") + for char in specialCharacters: + string = string.replace(char, "\\" + char) + + return string + +# output from the above +nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') + +nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') + +# Simpler things +nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]") + + +class InfosetFilter(object): + replacementRegexp = re.compile(r"U[\dA-F]{5,5}") + + def __init__(self, replaceChars=None, + dropXmlnsLocalName=False, + dropXmlnsAttrNs=False, + preventDoubleDashComments=False, + preventDashAtCommentEnd=False, + replaceFormFeedCharacters=True, + preventSingleQuotePubid=False): + + self.dropXmlnsLocalName = dropXmlnsLocalName + self.dropXmlnsAttrNs = dropXmlnsAttrNs + + self.preventDoubleDashComments = preventDoubleDashComments + self.preventDashAtCommentEnd = preventDashAtCommentEnd + + self.replaceFormFeedCharacters = replaceFormFeedCharacters + + self.preventSingleQuotePubid = preventSingleQuotePubid + + self.replaceCache = {} + + def coerceAttribute(self, name, namespace=None): + if self.dropXmlnsLocalName and name.startswith("xmlns:"): + warnings.warn("Attributes cannot begin with xmlns", DataLossWarning) + return None + elif (self.dropXmlnsAttrNs and + namespace == "http://www.w3.org/2000/xmlns/"): + warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning) + return None + else: + return self.toXmlName(name) + + def coerceElement(self, name, namespace=None): + return self.toXmlName(name) + + def coerceComment(self, data): + if self.preventDoubleDashComments: + while "--" in data: + warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning) + data = data.replace("--", "- -") + return data + + def coerceCharacters(self, data): + if self.replaceFormFeedCharacters: + for i in range(data.count("\x0C")): + warnings.warn("Text cannot contain U+000C", DataLossWarning) + data = data.replace("\x0C", " ") + # Other non-xml characters + return data + + def coercePubid(self, data): + dataOutput = data + for char in nonPubidCharRegexp.findall(data): + warnings.warn("Coercing non-XML pubid", DataLossWarning) + replacement = self.getReplacementCharacter(char) + dataOutput = dataOutput.replace(char, replacement) + if self.preventSingleQuotePubid and dataOutput.find("'") >= 0: + warnings.warn("Pubid cannot contain single quote", DataLossWarning) + dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'")) + return dataOutput + + def toXmlName(self, name): + nameFirst = name[0] + nameRest = name[1:] + m = nonXmlNameFirstBMPRegexp.match(nameFirst) + if m: + warnings.warn("Coercing non-XML name", DataLossWarning) + nameFirstOutput = self.getReplacementCharacter(nameFirst) + else: + nameFirstOutput = nameFirst + + nameRestOutput = nameRest + replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest)) + for char in replaceChars: + warnings.warn("Coercing non-XML name", DataLossWarning) + replacement = self.getReplacementCharacter(char) + nameRestOutput = nameRestOutput.replace(char, replacement) + return nameFirstOutput + nameRestOutput + + def getReplacementCharacter(self, char): + if char in self.replaceCache: + replacement = self.replaceCache[char] + else: + replacement = self.escapeChar(char) + return replacement + + def fromXmlName(self, name): + for item in set(self.replacementRegexp.findall(name)): + name = name.replace(item, self.unescapeChar(item)) + return name + + def escapeChar(self, char): + replacement = "U%05X" % ord(char) + self.replaceCache[char] = replacement + return replacement + + def unescapeChar(self, charcode): + return chr(int(charcode[1:], 16)) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/inputstream.py b/awx/lib/site-packages/pip/vendor/html5lib/inputstream.py new file mode 100644 index 0000000000..00d837fa51 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/inputstream.py @@ -0,0 +1,881 @@ +from __future__ import absolute_import, division, unicode_literals +from pip.vendor.six import text_type + +import codecs +import re + +from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase +from .constants import encodings, ReparseException +from . import utils + +from io import StringIO + +try: + from io import BytesIO +except ImportError: + BytesIO = StringIO + +try: + from io import BufferedIOBase +except ImportError: + class BufferedIOBase(object): + pass + +# Non-unicode versions of constants for use in the pre-parser +spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters]) +asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters]) +asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase]) +spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"]) + +invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]") + +non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, + 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, + 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, + 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, + 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, + 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, + 0x10FFFE, 0x10FFFF]) + +ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]") + +# Cache for charsUntil() +charsUntilRegEx = {} + + +class BufferedStream: + """Buffering for streams that do not have buffering of their own + + The buffer is implemented as a list of chunks on the assumption that + joining many strings will be slow since it is O(n**2) + """ + + def __init__(self, stream): + self.stream = stream + self.buffer = [] + self.position = [-1, 0] # chunk number, offset + + def tell(self): + pos = 0 + for chunk in self.buffer[:self.position[0]]: + pos += len(chunk) + pos += self.position[1] + return pos + + def seek(self, pos): + assert pos < self._bufferedBytes() + offset = pos + i = 0 + while len(self.buffer[i]) < offset: + offset -= pos + i += 1 + self.position = [i, offset] + + def read(self, bytes): + if not self.buffer: + return self._readStream(bytes) + elif (self.position[0] == len(self.buffer) and + self.position[1] == len(self.buffer[-1])): + return self._readStream(bytes) + else: + return self._readFromBuffer(bytes) + + def _bufferedBytes(self): + return sum([len(item) for item in self.buffer]) + + def _readStream(self, bytes): + data = self.stream.read(bytes) + self.buffer.append(data) + self.position[0] += 1 + self.position[1] = len(data) + return data + + def _readFromBuffer(self, bytes): + remainingBytes = bytes + rv = [] + bufferIndex = self.position[0] + bufferOffset = self.position[1] + while bufferIndex < len(self.buffer) and remainingBytes != 0: + assert remainingBytes > 0 + bufferedData = self.buffer[bufferIndex] + + if remainingBytes <= len(bufferedData) - bufferOffset: + bytesToRead = remainingBytes + self.position = [bufferIndex, bufferOffset + bytesToRead] + else: + bytesToRead = len(bufferedData) - bufferOffset + self.position = [bufferIndex, len(bufferedData)] + bufferIndex += 1 + rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead]) + remainingBytes -= bytesToRead + + bufferOffset = 0 + + if remainingBytes: + rv.append(self._readStream(remainingBytes)) + + return "".join(rv) + + +def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True): + if hasattr(source, "read"): + isUnicode = isinstance(source.read(0), text_type) + else: + isUnicode = isinstance(source, text_type) + + if isUnicode: + if encoding is not None: + raise TypeError("Cannot explicitly set an encoding with a unicode string") + + return HTMLUnicodeInputStream(source) + else: + return HTMLBinaryInputStream(source, encoding, parseMeta, chardet) + + +class HTMLUnicodeInputStream: + """Provides a unicode stream of characters to the HTMLTokenizer. + + This class takes care of character encoding and removing or replacing + incorrect byte-sequences and also provides column and line tracking. + + """ + + _defaultChunkSize = 10240 + + def __init__(self, source): + """Initialises the HTMLInputStream. + + HTMLInputStream(source, [encoding]) -> Normalized stream from source + for use by html5lib. + + source can be either a file-object, local filename or a string. + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + + parseMeta - Look for a <meta> element containing encoding information + + """ + + # Craziness + if len("\U0010FFFF") == 1: + self.reportCharacterErrors = self.characterErrorsUCS4 + self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]") + else: + self.reportCharacterErrors = self.characterErrorsUCS2 + self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])") + + # List of where new lines occur + self.newLines = [0] + + self.charEncoding = ("utf-8", "certain") + self.dataStream = self.openStream(source) + + self.reset() + + def reset(self): + self.chunk = "" + self.chunkSize = 0 + self.chunkOffset = 0 + self.errors = [] + + # number of (complete) lines in previous chunks + self.prevNumLines = 0 + # number of columns in the last line of the previous chunk + self.prevNumCols = 0 + + # Deal with CR LF and surrogates split over chunk boundaries + self._bufferedCharacter = None + + def openStream(self, source): + """Produces a file object from source. + + source can be either a file object, local filename or a string. + + """ + # Already a file object + if hasattr(source, 'read'): + stream = source + else: + stream = StringIO(source) + + return stream + + def _position(self, offset): + chunk = self.chunk + nLines = chunk.count('\n', 0, offset) + positionLine = self.prevNumLines + nLines + lastLinePos = chunk.rfind('\n', 0, offset) + if lastLinePos == -1: + positionColumn = self.prevNumCols + offset + else: + positionColumn = offset - (lastLinePos + 1) + return (positionLine, positionColumn) + + def position(self): + """Returns (line, col) of the current position in the stream.""" + line, col = self._position(self.chunkOffset) + return (line + 1, col) + + def char(self): + """ Read one character from the stream or queue if available. Return + EOF when EOF is reached. + """ + # Read a new chunk from the input stream if necessary + if self.chunkOffset >= self.chunkSize: + if not self.readChunk(): + return EOF + + chunkOffset = self.chunkOffset + char = self.chunk[chunkOffset] + self.chunkOffset = chunkOffset + 1 + + return char + + def readChunk(self, chunkSize=None): + if chunkSize is None: + chunkSize = self._defaultChunkSize + + self.prevNumLines, self.prevNumCols = self._position(self.chunkSize) + + self.chunk = "" + self.chunkSize = 0 + self.chunkOffset = 0 + + data = self.dataStream.read(chunkSize) + + # Deal with CR LF and surrogates broken across chunks + if self._bufferedCharacter: + data = self._bufferedCharacter + data + self._bufferedCharacter = None + elif not data: + # We have no more data, bye-bye stream + return False + + if len(data) > 1: + lastv = ord(data[-1]) + if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF: + self._bufferedCharacter = data[-1] + data = data[:-1] + + self.reportCharacterErrors(data) + + # Replace invalid characters + # Note U+0000 is dealt with in the tokenizer + data = self.replaceCharactersRegexp.sub("\ufffd", data) + + data = data.replace("\r\n", "\n") + data = data.replace("\r", "\n") + + self.chunk = data + self.chunkSize = len(data) + + return True + + def characterErrorsUCS4(self, data): + for i in range(len(invalid_unicode_re.findall(data))): + self.errors.append("invalid-codepoint") + + def characterErrorsUCS2(self, data): + # Someone picked the wrong compile option + # You lose + skip = False + for match in invalid_unicode_re.finditer(data): + if skip: + continue + codepoint = ord(match.group()) + pos = match.start() + # Pretty sure there should be endianness issues here + if utils.isSurrogatePair(data[pos:pos + 2]): + # We have a surrogate pair! + char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2]) + if char_val in non_bmp_invalid_codepoints: + self.errors.append("invalid-codepoint") + skip = True + elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and + pos == len(data) - 1): + self.errors.append("invalid-codepoint") + else: + skip = False + self.errors.append("invalid-codepoint") + + def charsUntil(self, characters, opposite=False): + """ Returns a string of characters from the stream up to but not + including any character in 'characters' or EOF. 'characters' must be + a container that supports the 'in' method and iteration over its + characters. + """ + + # Use a cache of regexps to find the required characters + try: + chars = charsUntilRegEx[(characters, opposite)] + except KeyError: + if __debug__: + for c in characters: + assert(ord(c) < 128) + regex = "".join(["\\x%02x" % ord(c) for c in characters]) + if not opposite: + regex = "^%s" % regex + chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex) + + rv = [] + + while True: + # Find the longest matching prefix + m = chars.match(self.chunk, self.chunkOffset) + if m is None: + # If nothing matched, and it wasn't because we ran out of chunk, + # then stop + if self.chunkOffset != self.chunkSize: + break + else: + end = m.end() + # If not the whole chunk matched, return everything + # up to the part that didn't match + if end != self.chunkSize: + rv.append(self.chunk[self.chunkOffset:end]) + self.chunkOffset = end + break + # If the whole remainder of the chunk matched, + # use it all and read the next chunk + rv.append(self.chunk[self.chunkOffset:]) + if not self.readChunk(): + # Reached EOF + break + + r = "".join(rv) + return r + + def unget(self, char): + # Only one character is allowed to be ungotten at once - it must + # be consumed again before any further call to unget + if char is not None: + if self.chunkOffset == 0: + # unget is called quite rarely, so it's a good idea to do + # more work here if it saves a bit of work in the frequently + # called char and charsUntil. + # So, just prepend the ungotten character onto the current + # chunk: + self.chunk = char + self.chunk + self.chunkSize += 1 + else: + self.chunkOffset -= 1 + assert self.chunk[self.chunkOffset] == char + + +class HTMLBinaryInputStream(HTMLUnicodeInputStream): + """Provides a unicode stream of characters to the HTMLTokenizer. + + This class takes care of character encoding and removing or replacing + incorrect byte-sequences and also provides column and line tracking. + + """ + + def __init__(self, source, encoding=None, parseMeta=True, chardet=True): + """Initialises the HTMLInputStream. + + HTMLInputStream(source, [encoding]) -> Normalized stream from source + for use by html5lib. + + source can be either a file-object, local filename or a string. + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + + parseMeta - Look for a <meta> element containing encoding information + + """ + # Raw Stream - for unicode objects this will encode to utf-8 and set + # self.charEncoding as appropriate + self.rawStream = self.openStream(source) + + HTMLUnicodeInputStream.__init__(self, self.rawStream) + + self.charEncoding = (codecName(encoding), "certain") + + # Encoding Information + # Number of bytes to use when looking for a meta element with + # encoding information + self.numBytesMeta = 512 + # Number of bytes to use when using detecting encoding using chardet + self.numBytesChardet = 100 + # Encoding to use if no other information can be found + self.defaultEncoding = "windows-1252" + + # Detect encoding iff no explicit "transport level" encoding is supplied + if (self.charEncoding[0] is None): + self.charEncoding = self.detectEncoding(parseMeta, chardet) + + # Call superclass + self.reset() + + def reset(self): + self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream, + 'replace') + HTMLUnicodeInputStream.reset(self) + + def openStream(self, source): + """Produces a file object from source. + + source can be either a file object, local filename or a string. + + """ + # Already a file object + if hasattr(source, 'read'): + stream = source + else: + stream = BytesIO(source) + + try: + stream.seek(stream.tell()) + except: + stream = BufferedStream(stream) + + return stream + + def detectEncoding(self, parseMeta=True, chardet=True): + # First look for a BOM + # This will also read past the BOM if present + encoding = self.detectBOM() + confidence = "certain" + # If there is no BOM need to look for meta elements with encoding + # information + if encoding is None and parseMeta: + encoding = self.detectEncodingMeta() + confidence = "tentative" + # Guess with chardet, if avaliable + if encoding is None and chardet: + confidence = "tentative" + try: + try: + from charade.universaldetector import UniversalDetector + except ImportError: + from chardet.universaldetector import UniversalDetector + buffers = [] + detector = UniversalDetector() + while not detector.done: + buffer = self.rawStream.read(self.numBytesChardet) + assert isinstance(buffer, bytes) + if not buffer: + break + buffers.append(buffer) + detector.feed(buffer) + detector.close() + encoding = detector.result['encoding'] + self.rawStream.seek(0) + except ImportError: + pass + # If all else fails use the default encoding + if encoding is None: + confidence = "tentative" + encoding = self.defaultEncoding + + # Substitute for equivalent encodings: + encodingSub = {"iso-8859-1": "windows-1252"} + + if encoding.lower() in encodingSub: + encoding = encodingSub[encoding.lower()] + + return encoding, confidence + + def changeEncoding(self, newEncoding): + assert self.charEncoding[1] != "certain" + newEncoding = codecName(newEncoding) + if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"): + newEncoding = "utf-8" + if newEncoding is None: + return + elif newEncoding == self.charEncoding[0]: + self.charEncoding = (self.charEncoding[0], "certain") + else: + self.rawStream.seek(0) + self.reset() + self.charEncoding = (newEncoding, "certain") + raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding)) + + def detectBOM(self): + """Attempts to detect at BOM at the start of the stream. If + an encoding can be determined from the BOM return the name of the + encoding otherwise return None""" + bomDict = { + codecs.BOM_UTF8: 'utf-8', + codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be', + codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be' + } + + # Go to beginning of file and read in 4 bytes + string = self.rawStream.read(4) + assert isinstance(string, bytes) + + # Try detecting the BOM using bytes from the string + encoding = bomDict.get(string[:3]) # UTF-8 + seek = 3 + if not encoding: + # Need to detect UTF-32 before UTF-16 + encoding = bomDict.get(string) # UTF-32 + seek = 4 + if not encoding: + encoding = bomDict.get(string[:2]) # UTF-16 + seek = 2 + + # Set the read position past the BOM if one was found, otherwise + # set it to the start of the stream + self.rawStream.seek(encoding and seek or 0) + + return encoding + + def detectEncodingMeta(self): + """Report the encoding declared by the meta element + """ + buffer = self.rawStream.read(self.numBytesMeta) + assert isinstance(buffer, bytes) + parser = EncodingParser(buffer) + self.rawStream.seek(0) + encoding = parser.getEncoding() + + if encoding in ("utf-16", "utf-16-be", "utf-16-le"): + encoding = "utf-8" + + return encoding + + +class EncodingBytes(bytes): + """String-like object with an associated position and various extra methods + If the position is ever greater than the string length then an exception is + raised""" + def __new__(self, value): + assert isinstance(value, bytes) + return bytes.__new__(self, value.lower()) + + def __init__(self, value): + self._position = -1 + + def __iter__(self): + return self + + def __next__(self): + p = self._position = self._position + 1 + if p >= len(self): + raise StopIteration + elif p < 0: + raise TypeError + return self[p:p + 1] + + def next(self): + # Py2 compat + return self.__next__() + + def previous(self): + p = self._position + if p >= len(self): + raise StopIteration + elif p < 0: + raise TypeError + self._position = p = p - 1 + return self[p:p + 1] + + def setPosition(self, position): + if self._position >= len(self): + raise StopIteration + self._position = position + + def getPosition(self): + if self._position >= len(self): + raise StopIteration + if self._position >= 0: + return self._position + else: + return None + + position = property(getPosition, setPosition) + + def getCurrentByte(self): + return self[self.position:self.position + 1] + + currentByte = property(getCurrentByte) + + def skip(self, chars=spaceCharactersBytes): + """Skip past a list of characters""" + p = self.position # use property for the error-checking + while p < len(self): + c = self[p:p + 1] + if c not in chars: + self._position = p + return c + p += 1 + self._position = p + return None + + def skipUntil(self, chars): + p = self.position + while p < len(self): + c = self[p:p + 1] + if c in chars: + self._position = p + return c + p += 1 + self._position = p + return None + + def matchBytes(self, bytes): + """Look for a sequence of bytes at the start of a string. If the bytes + are found return True and advance the position to the byte after the + match. Otherwise return False and leave the position alone""" + p = self.position + data = self[p:p + len(bytes)] + rv = data.startswith(bytes) + if rv: + self.position += len(bytes) + return rv + + def jumpTo(self, bytes): + """Look for the next sequence of bytes matching a given sequence. If + a match is found advance the position to the last byte of the match""" + newPosition = self[self.position:].find(bytes) + if newPosition > -1: + # XXX: This is ugly, but I can't see a nicer way to fix this. + if self._position == -1: + self._position = 0 + self._position += (newPosition + len(bytes) - 1) + return True + else: + raise StopIteration + + +class EncodingParser(object): + """Mini parser for detecting character encoding from meta elements""" + + def __init__(self, data): + """string - the data to work on for encoding detection""" + self.data = EncodingBytes(data) + self.encoding = None + + def getEncoding(self): + methodDispatch = ( + (b"<!--", self.handleComment), + (b"<meta", self.handleMeta), + (b"</", self.handlePossibleEndTag), + (b"<!", self.handleOther), + (b"<?", self.handleOther), + (b"<", self.handlePossibleStartTag)) + for byte in self.data: + keepParsing = True + for key, method in methodDispatch: + if self.data.matchBytes(key): + try: + keepParsing = method() + break + except StopIteration: + keepParsing = False + break + if not keepParsing: + break + + return self.encoding + + def handleComment(self): + """Skip over comments""" + return self.data.jumpTo(b"-->") + + def handleMeta(self): + if self.data.currentByte not in spaceCharactersBytes: + # if we have <meta not followed by a space so just keep going + return True + # We have a valid meta element we want to search for attributes + hasPragma = False + pendingEncoding = None + while True: + # Try to find the next attribute after the current position + attr = self.getAttribute() + if attr is None: + return True + else: + if attr[0] == b"http-equiv": + hasPragma = attr[1] == b"content-type" + if hasPragma and pendingEncoding is not None: + self.encoding = pendingEncoding + return False + elif attr[0] == b"charset": + tentativeEncoding = attr[1] + codec = codecName(tentativeEncoding) + if codec is not None: + self.encoding = codec + return False + elif attr[0] == b"content": + contentParser = ContentAttrParser(EncodingBytes(attr[1])) + tentativeEncoding = contentParser.parse() + if tentativeEncoding is not None: + codec = codecName(tentativeEncoding) + if codec is not None: + if hasPragma: + self.encoding = codec + return False + else: + pendingEncoding = codec + + def handlePossibleStartTag(self): + return self.handlePossibleTag(False) + + def handlePossibleEndTag(self): + next(self.data) + return self.handlePossibleTag(True) + + def handlePossibleTag(self, endTag): + data = self.data + if data.currentByte not in asciiLettersBytes: + # If the next byte is not an ascii letter either ignore this + # fragment (possible start tag case) or treat it according to + # handleOther + if endTag: + data.previous() + self.handleOther() + return True + + c = data.skipUntil(spacesAngleBrackets) + if c == b"<": + # return to the first step in the overall "two step" algorithm + # reprocessing the < byte + data.previous() + else: + # Read all attributes + attr = self.getAttribute() + while attr is not None: + attr = self.getAttribute() + return True + + def handleOther(self): + return self.data.jumpTo(b">") + + def getAttribute(self): + """Return a name,value pair for the next attribute in the stream, + if one is found, or None""" + data = self.data + # Step 1 (skip chars) + c = data.skip(spaceCharactersBytes | frozenset([b"/"])) + assert c is None or len(c) == 1 + # Step 2 + if c in (b">", None): + return None + # Step 3 + attrName = [] + attrValue = [] + # Step 4 attribute name + while True: + if c == b"=" and attrName: + break + elif c in spaceCharactersBytes: + # Step 6! + c = data.skip() + break + elif c in (b"/", b">"): + return b"".join(attrName), b"" + elif c in asciiUppercaseBytes: + attrName.append(c.lower()) + elif c is None: + return None + else: + attrName.append(c) + # Step 5 + c = next(data) + # Step 7 + if c != b"=": + data.previous() + return b"".join(attrName), b"" + # Step 8 + next(data) + # Step 9 + c = data.skip() + # Step 10 + if c in (b"'", b'"'): + # 10.1 + quoteChar = c + while True: + # 10.2 + c = next(data) + # 10.3 + if c == quoteChar: + next(data) + return b"".join(attrName), b"".join(attrValue) + # 10.4 + elif c in asciiUppercaseBytes: + attrValue.append(c.lower()) + # 10.5 + else: + attrValue.append(c) + elif c == b">": + return b"".join(attrName), b"" + elif c in asciiUppercaseBytes: + attrValue.append(c.lower()) + elif c is None: + return None + else: + attrValue.append(c) + # Step 11 + while True: + c = next(data) + if c in spacesAngleBrackets: + return b"".join(attrName), b"".join(attrValue) + elif c in asciiUppercaseBytes: + attrValue.append(c.lower()) + elif c is None: + return None + else: + attrValue.append(c) + + +class ContentAttrParser(object): + def __init__(self, data): + assert isinstance(data, bytes) + self.data = data + + def parse(self): + try: + # Check if the attr name is charset + # otherwise return + self.data.jumpTo(b"charset") + self.data.position += 1 + self.data.skip() + if not self.data.currentByte == b"=": + # If there is no = sign keep looking for attrs + return None + self.data.position += 1 + self.data.skip() + # Look for an encoding between matching quote marks + if self.data.currentByte in (b'"', b"'"): + quoteMark = self.data.currentByte + self.data.position += 1 + oldPosition = self.data.position + if self.data.jumpTo(quoteMark): + return self.data[oldPosition:self.data.position] + else: + return None + else: + # Unquoted value + oldPosition = self.data.position + try: + self.data.skipUntil(spaceCharactersBytes) + return self.data[oldPosition:self.data.position] + except StopIteration: + # Return the whole remaining value + return self.data[oldPosition:] + except StopIteration: + return None + + +def codecName(encoding): + """Return the python codec name corresponding to an encoding or None if the + string doesn't correspond to a valid encoding.""" + if isinstance(encoding, bytes): + try: + encoding = encoding.decode("ascii") + except UnicodeDecodeError: + return None + if encoding: + canonicalName = ascii_punctuation_re.sub("", encoding).lower() + return encodings.get(canonicalName, None) + else: + return None diff --git a/awx/lib/site-packages/pip/vendor/html5lib/sanitizer.py b/awx/lib/site-packages/pip/vendor/html5lib/sanitizer.py new file mode 100644 index 0000000000..71dc5212c1 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/sanitizer.py @@ -0,0 +1,271 @@ +from __future__ import absolute_import, division, unicode_literals + +import re +from xml.sax.saxutils import escape, unescape + +from .tokenizer import HTMLTokenizer +from .constants import tokenTypes + + +class HTMLSanitizerMixin(object): + """ sanitization of XHTML+MathML+SVG and of inline style attributes.""" + + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', + 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', + 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', + 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', + 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', + 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', + 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', + 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', + 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', + 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', + 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', + 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', + 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video'] + + mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi', + 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', + 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub', + 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', + 'munderover', 'none'] + + svg_elements = ['a', 'animate', 'animateColor', 'animateMotion', + 'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse', + 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', + 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', + 'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect', + 'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', + 'background', 'balance', 'bgcolor', 'bgproperties', 'border', + 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', + 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', + 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', + 'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords', + 'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', + 'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', + 'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers', + 'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace', + 'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing', + 'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend', + 'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method', + 'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open', + 'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload', + 'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min', + 'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan', + 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', + 'step', 'style', 'summary', 'suppress', 'tabindex', 'target', + 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap', + 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml', + 'width', 'wrap', 'xml:lang'] + + mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign', + 'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth', + 'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence', + 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace', + 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize', + 'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines', + 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', + 'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show', + 'xlink:type', 'xmlns', 'xmlns:xlink'] + + svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic', + 'arabic-form', 'ascent', 'attributeName', 'attributeType', + 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', + 'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx', + 'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill', + 'fill-opacity', 'fill-rule', 'font-family', 'font-size', + 'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from', + 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging', + 'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k', + 'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end', + 'marker-mid', 'marker-start', 'markerHeight', 'markerUnits', + 'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset', + 'opacity', 'orient', 'origin', 'overline-position', + 'overline-thickness', 'panose-1', 'path', 'pathLength', 'points', + 'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount', + 'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart', + 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color', + 'stop-opacity', 'strikethrough-position', 'strikethrough-thickness', + 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', + 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', + 'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to', + 'transform', 'type', 'u1', 'u2', 'underline-position', + 'underline-thickness', 'unicode', 'unicode-range', 'units-per-em', + 'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x', + 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', + 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', + 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', + 'y1', 'y2', 'zoomAndPan'] + + attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster', + 'xlink:href', 'xml:base'] + + svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill', + 'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end', + 'mask', 'stroke'] + + svg_allow_local_href = ['altGlyph', 'animate', 'animateColor', + 'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter', + 'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref', + 'set', 'use'] + + acceptable_css_properties = ['azimuth', 'background-color', + 'border-bottom-color', 'border-collapse', 'border-color', + 'border-left-color', 'border-right-color', 'border-top-color', 'clear', + 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', + 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', + 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', + 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', + 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', + 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', + 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', + 'white-space', 'width'] + + acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue', + 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', + 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', + 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', + 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', + 'transparent', 'underline', 'white', 'yellow'] + + acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule', + 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', + 'stroke-opacity'] + + acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc', + 'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal', + 'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag', + 'ssh', 'sftp', 'rtsp', 'afs'] + + # subclasses may define their own versions of these constants + allowed_elements = acceptable_elements + mathml_elements + svg_elements + allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes + allowed_css_properties = acceptable_css_properties + allowed_css_keywords = acceptable_css_keywords + allowed_svg_properties = acceptable_svg_properties + allowed_protocols = acceptable_protocols + + # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and + # stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style + # attributes are parsed, and a restricted set, # specified by + # ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through. + # attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified + # in ALLOWED_PROTOCOLS are allowed. + # + # sanitize_html('<script> do_nasty_stuff() </script>') + # => <script> do_nasty_stuff() </script> + # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') + # => <a>Click here for $100</a> + def sanitize_token(self, token): + + # accommodate filters which use token_type differently + token_type = token["type"] + if token_type in list(tokenTypes.keys()): + token_type = tokenTypes[token_type] + + if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"], + tokenTypes["EmptyTag"]): + if token["name"] in self.allowed_elements: + return self.allowed_token(token, token_type) + else: + return self.disallowed_token(token, token_type) + elif token_type == tokenTypes["Comment"]: + pass + else: + return token + + def allowed_token(self, token, token_type): + if "data" in token: + attrs = dict([(name, val) for name, val in + token["data"][::-1] + if name in self.allowed_attributes]) + for attr in self.attr_val_is_uri: + if attr not in attrs: + continue + val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '', + unescape(attrs[attr])).lower() + # remove replacement characters from unescaped characters + val_unescaped = val_unescaped.replace("\ufffd", "") + if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and + (val_unescaped.split(':')[0] not in + self.allowed_protocols)): + del attrs[attr] + for attr in self.svg_attr_val_allows_ref: + if attr in attrs: + attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', + ' ', + unescape(attrs[attr])) + if (token["name"] in self.svg_allow_local_href and + 'xlink:href' in attrs and re.search('^\s*[^#\s].*', + attrs['xlink:href'])): + del attrs['xlink:href'] + if 'style' in attrs: + attrs['style'] = self.sanitize_css(attrs['style']) + token["data"] = [[name, val] for name, val in list(attrs.items())] + return token + + def disallowed_token(self, token, token_type): + if token_type == tokenTypes["EndTag"]: + token["data"] = "</%s>" % token["name"] + elif token["data"]: + attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]]) + token["data"] = "<%s%s>" % (token["name"], attrs) + else: + token["data"] = "<%s>" % token["name"] + if token.get("selfClosing"): + token["data"] = token["data"][:-1] + "/>" + + if token["type"] in list(tokenTypes.keys()): + token["type"] = "Characters" + else: + token["type"] = tokenTypes["Characters"] + + del token["name"] + return token + + def sanitize_css(self, style): + # disallow urls + style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) + + # gauntlet + if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): + return '' + if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): + return '' + + clean = [] + for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style): + if not value: + continue + if prop.lower() in self.allowed_css_properties: + clean.append(prop + ': ' + value + ';') + elif prop.split('-')[0].lower() in ['background', 'border', 'margin', + 'padding']: + for keyword in value.split(): + if not keyword in self.acceptable_css_keywords and \ + not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): + break + else: + clean.append(prop + ': ' + value + ';') + elif prop.lower() in self.allowed_svg_properties: + clean.append(prop + ': ' + value + ';') + + return ' '.join(clean) + + +class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin): + def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, + lowercaseElementName=False, lowercaseAttrName=False, parser=None): + # Change case matching defaults as we only output lowercase html anyway + # This solution doesn't seem ideal... + HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet, + lowercaseElementName, lowercaseAttrName, parser=parser) + + def __iter__(self): + for token in HTMLTokenizer.__iter__(self): + token = self.sanitize_token(token) + if token: + yield token diff --git a/awx/lib/site-packages/pip/vendor/html5lib/serializer/__init__.py b/awx/lib/site-packages/pip/vendor/html5lib/serializer/__init__.py new file mode 100644 index 0000000000..8380839a6d --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/serializer/__init__.py @@ -0,0 +1,16 @@ +from __future__ import absolute_import, division, unicode_literals + +from .. import treewalkers + +from .htmlserializer import HTMLSerializer + + +def serialize(input, tree="etree", format="html", encoding=None, + **serializer_opts): + # XXX: Should we cache this? + walker = treewalkers.getTreeWalker(tree) + if format == "html": + s = HTMLSerializer(**serializer_opts) + else: + raise ValueError("type must be html") + return s.render(walker(input), encoding) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/serializer/htmlserializer.py b/awx/lib/site-packages/pip/vendor/html5lib/serializer/htmlserializer.py new file mode 100644 index 0000000000..11a932703b --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/serializer/htmlserializer.py @@ -0,0 +1,309 @@ +from __future__ import absolute_import, division, unicode_literals +from pip.vendor.six import text_type + +import gettext +_ = gettext.gettext + +try: + from functools import reduce +except ImportError: + pass + +from ..constants import voidElements, booleanAttributes, spaceCharacters +from ..constants import rcdataElements, entities, xmlEntities +from .. import utils +from xml.sax.saxutils import escape + +spaceCharacters = "".join(spaceCharacters) + +try: + from codecs import register_error, xmlcharrefreplace_errors +except ImportError: + unicode_encode_errors = "strict" +else: + unicode_encode_errors = "htmlentityreplace" + + encode_entity_map = {} + is_ucs4 = len("\U0010FFFF") == 1 + for k, v in list(entities.items()): + # skip multi-character entities + if ((is_ucs4 and len(v) > 1) or + (not is_ucs4 and len(v) > 2)): + continue + if v != "&": + if len(v) == 2: + v = utils.surrogatePairToCodepoint(v) + else: + v = ord(v) + if not v in encode_entity_map or k.islower(): + # prefer < over < and similarly for &, >, etc. + encode_entity_map[v] = k + + def htmlentityreplace_errors(exc): + if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): + res = [] + codepoints = [] + skip = False + for i, c in enumerate(exc.object[exc.start:exc.end]): + if skip: + skip = False + continue + index = i + exc.start + if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): + codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2]) + skip = True + else: + codepoint = ord(c) + codepoints.append(codepoint) + for cp in codepoints: + e = encode_entity_map.get(cp) + if e: + res.append("&") + res.append(e) + if not e.endswith(";"): + res.append(";") + else: + res.append("&#x%s;" % (hex(cp)[2:])) + return ("".join(res), exc.end) + else: + return xmlcharrefreplace_errors(exc) + + register_error(unicode_encode_errors, htmlentityreplace_errors) + + del register_error + + +class HTMLSerializer(object): + + # attribute quoting options + quote_attr_values = False + quote_char = '"' + use_best_quote_char = True + + # tag syntax options + omit_optional_tags = True + minimize_boolean_attributes = True + use_trailing_solidus = False + space_before_trailing_solidus = True + + # escaping options + escape_lt_in_attrs = False + escape_rcdata = False + resolve_entities = True + + # miscellaneous options + inject_meta_charset = True + strip_whitespace = False + sanitize = False + + options = ("quote_attr_values", "quote_char", "use_best_quote_char", + "minimize_boolean_attributes", "use_trailing_solidus", + "space_before_trailing_solidus", "omit_optional_tags", + "strip_whitespace", "inject_meta_charset", "escape_lt_in_attrs", + "escape_rcdata", "resolve_entities", "sanitize") + + def __init__(self, **kwargs): + """Initialize HTMLSerializer. + + Keyword options (default given first unless specified) include: + + inject_meta_charset=True|False + Whether it insert a meta element to define the character set of the + document. + quote_attr_values=True|False + Whether to quote attribute values that don't require quoting + per HTML5 parsing rules. + quote_char=u'"'|u"'" + Use given quote character for attribute quoting. Default is to + use double quote unless attribute value contains a double quote, + in which case single quotes are used instead. + escape_lt_in_attrs=False|True + Whether to escape < in attribute values. + escape_rcdata=False|True + Whether to escape characters that need to be escaped within normal + elements within rcdata elements such as style. + resolve_entities=True|False + Whether to resolve named character entities that appear in the + source tree. The XML predefined entities < > & " ' + are unaffected by this setting. + strip_whitespace=False|True + Whether to remove semantically meaningless whitespace. (This + compresses all whitespace to a single space except within pre.) + minimize_boolean_attributes=True|False + Shortens boolean attributes to give just the attribute value, + for example <input disabled="disabled"> becomes <input disabled>. + use_trailing_solidus=False|True + Includes a close-tag slash at the end of the start tag of void + elements (empty elements whose end tag is forbidden). E.g. <hr/>. + space_before_trailing_solidus=True|False + Places a space immediately before the closing slash in a tag + using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus. + sanitize=False|True + Strip all unsafe or unknown constructs from output. + See `html5lib user documentation`_ + omit_optional_tags=True|False + Omit start/end tags that are optional. + + .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation + """ + if 'quote_char' in kwargs: + self.use_best_quote_char = False + for attr in self.options: + setattr(self, attr, kwargs.get(attr, getattr(self, attr))) + self.errors = [] + self.strict = False + + def encode(self, string): + assert(isinstance(string, text_type)) + if self.encoding: + return string.encode(self.encoding, unicode_encode_errors) + else: + return string + + def encodeStrict(self, string): + assert(isinstance(string, text_type)) + if self.encoding: + return string.encode(self.encoding, "strict") + else: + return string + + def serialize(self, treewalker, encoding=None): + self.encoding = encoding + in_cdata = False + self.errors = [] + if encoding and self.inject_meta_charset: + from ..filters.inject_meta_charset import Filter + treewalker = Filter(treewalker, encoding) + # XXX: WhitespaceFilter should be used before OptionalTagFilter + # for maximum efficiently of this latter filter + if self.strip_whitespace: + from ..filters.whitespace import Filter + treewalker = Filter(treewalker) + if self.sanitize: + from ..filters.sanitizer import Filter + treewalker = Filter(treewalker) + if self.omit_optional_tags: + from ..filters.optionaltags import Filter + treewalker = Filter(treewalker) + for token in treewalker: + type = token["type"] + if type == "Doctype": + doctype = "<!DOCTYPE %s" % token["name"] + + if token["publicId"]: + doctype += ' PUBLIC "%s"' % token["publicId"] + elif token["systemId"]: + doctype += " SYSTEM" + if token["systemId"]: + if token["systemId"].find('"') >= 0: + if token["systemId"].find("'") >= 0: + self.serializeError(_("System identifer contains both single and double quote characters")) + quote_char = "'" + else: + quote_char = '"' + doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) + + doctype += ">" + yield self.encodeStrict(doctype) + + elif type in ("Characters", "SpaceCharacters"): + if type == "SpaceCharacters" or in_cdata: + if in_cdata and token["data"].find("</") >= 0: + self.serializeError(_("Unexpected </ in CDATA")) + yield self.encode(token["data"]) + else: + yield self.encode(escape(token["data"])) + + elif type in ("StartTag", "EmptyTag"): + name = token["name"] + yield self.encodeStrict("<%s" % name) + if name in rcdataElements and not self.escape_rcdata: + in_cdata = True + elif in_cdata: + self.serializeError(_("Unexpected child element of a CDATA element")) + for (attr_namespace, attr_name), attr_value in token["data"].items(): + # TODO: Add namespace support here + k = attr_name + v = attr_value + yield self.encodeStrict(' ') + + yield self.encodeStrict(k) + if not self.minimize_boolean_attributes or \ + (k not in booleanAttributes.get(name, tuple()) + and k not in booleanAttributes.get("", tuple())): + yield self.encodeStrict("=") + if self.quote_attr_values or not v: + quote_attr = True + else: + quote_attr = reduce(lambda x, y: x or (y in v), + spaceCharacters + ">\"'=", False) + v = v.replace("&", "&") + if self.escape_lt_in_attrs: + v = v.replace("<", "<") + if quote_attr: + quote_char = self.quote_char + if self.use_best_quote_char: + if "'" in v and '"' not in v: + quote_char = '"' + elif '"' in v and "'" not in v: + quote_char = "'" + if quote_char == "'": + v = v.replace("'", "'") + else: + v = v.replace('"', """) + yield self.encodeStrict(quote_char) + yield self.encode(v) + yield self.encodeStrict(quote_char) + else: + yield self.encode(v) + if name in voidElements and self.use_trailing_solidus: + if self.space_before_trailing_solidus: + yield self.encodeStrict(" /") + else: + yield self.encodeStrict("/") + yield self.encode(">") + + elif type == "EndTag": + name = token["name"] + if name in rcdataElements: + in_cdata = False + elif in_cdata: + self.serializeError(_("Unexpected child element of a CDATA element")) + yield self.encodeStrict("</%s>" % name) + + elif type == "Comment": + data = token["data"] + if data.find("--") >= 0: + self.serializeError(_("Comment contains --")) + yield self.encodeStrict("<!--%s-->" % token["data"]) + + elif type == "Entity": + name = token["name"] + key = name + ";" + if not key in entities: + self.serializeError(_("Entity %s not recognized" % name)) + if self.resolve_entities and key not in xmlEntities: + data = entities[key] + else: + data = "&%s;" % name + yield self.encodeStrict(data) + + else: + self.serializeError(token["data"]) + + def render(self, treewalker, encoding=None): + if encoding: + return b"".join(list(self.serialize(treewalker, encoding))) + else: + return "".join(list(self.serialize(treewalker))) + + def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): + # XXX The idea is to make data mandatory. + self.errors.append(data) + if self.strict: + raise SerializeError + + +def SerializeError(Exception): + """Error in serialized tree""" + pass diff --git a/awx/lib/site-packages/pip/vendor/html5lib/tokenizer.py b/awx/lib/site-packages/pip/vendor/html5lib/tokenizer.py new file mode 100644 index 0000000000..797745787a --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/tokenizer.py @@ -0,0 +1,1731 @@ +from __future__ import absolute_import, division, unicode_literals + +try: + chr = unichr # flake8: noqa +except NameError: + pass + +from collections import deque + +from .constants import spaceCharacters +from .constants import entities +from .constants import asciiLetters, asciiUpper2Lower +from .constants import digits, hexDigits, EOF +from .constants import tokenTypes, tagTokenTypes +from .constants import replacementCharacters + +from .inputstream import HTMLInputStream + +from .trie import Trie + +entitiesTrie = Trie(entities) + + +class HTMLTokenizer(object): + """ This class takes care of tokenizing HTML. + + * self.currentToken + Holds the token that is currently being processed. + + * self.state + Holds a reference to the method to be invoked... XXX + + * self.stream + Points to HTMLInputStream object. + """ + + def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, + lowercaseElementName=True, lowercaseAttrName=True, parser=None): + + self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet) + self.parser = parser + + # Perform case conversions? + self.lowercaseElementName = lowercaseElementName + self.lowercaseAttrName = lowercaseAttrName + + # Setup the initial tokenizer state + self.escapeFlag = False + self.lastFourChars = [] + self.state = self.dataState + self.escape = False + + # The current token being created + self.currentToken = None + super(HTMLTokenizer, self).__init__() + + def __iter__(self): + """ This is where the magic happens. + + We do our usually processing through the states and when we have a token + to return we yield the token which pauses processing until the next token + is requested. + """ + self.tokenQueue = deque([]) + # Start processing. When EOF is reached self.state will return False + # instead of True and the loop will terminate. + while self.state(): + while self.stream.errors: + yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} + while self.tokenQueue: + yield self.tokenQueue.popleft() + + def consumeNumberEntity(self, isHex): + """This function returns either U+FFFD or the character based on the + decimal or hexadecimal representation. It also discards ";" if present. + If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. + """ + + allowed = digits + radix = 10 + if isHex: + allowed = hexDigits + radix = 16 + + charStack = [] + + # Consume all the characters that are in range while making sure we + # don't hit an EOF. + c = self.stream.char() + while c in allowed and c is not EOF: + charStack.append(c) + c = self.stream.char() + + # Convert the set of characters consumed to an int. + charAsInt = int("".join(charStack), radix) + + # Certain characters get replaced with others + if charAsInt in replacementCharacters: + char = replacementCharacters[charAsInt] + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "illegal-codepoint-for-numeric-entity", + "datavars": {"charAsInt": charAsInt}}) + elif ((0xD800 <= charAsInt <= 0xDFFF) or + (charAsInt > 0x10FFFF)): + char = "\uFFFD" + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "illegal-codepoint-for-numeric-entity", + "datavars": {"charAsInt": charAsInt}}) + else: + # Should speed up this check somehow (e.g. move the set to a constant) + if ((0x0001 <= charAsInt <= 0x0008) or + (0x000E <= charAsInt <= 0x001F) or + (0x007F <= charAsInt <= 0x009F) or + (0xFDD0 <= charAsInt <= 0xFDEF) or + charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, + 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, + 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, + 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, + 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, + 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, + 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, + 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, + 0xFFFFF, 0x10FFFE, 0x10FFFF])): + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": + "illegal-codepoint-for-numeric-entity", + "datavars": {"charAsInt": charAsInt}}) + try: + # Try/except needed as UCS-2 Python builds' unichar only works + # within the BMP. + char = chr(charAsInt) + except ValueError: + v = charAsInt - 0x10000 + char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) + + # Discard the ; if present. Otherwise, put it back on the queue and + # invoke parseError on parser. + if c != ";": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "numeric-entity-without-semicolon"}) + self.stream.unget(c) + + return char + + def consumeEntity(self, allowedChar=None, fromAttribute=False): + # Initialise to the default output for when no entity is matched + output = "&" + + charStack = [self.stream.char()] + if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") + or (allowedChar is not None and allowedChar == charStack[0])): + self.stream.unget(charStack[0]) + + elif charStack[0] == "#": + # Read the next character to see if it's hex or decimal + hex = False + charStack.append(self.stream.char()) + if charStack[-1] in ("x", "X"): + hex = True + charStack.append(self.stream.char()) + + # charStack[-1] should be the first digit + if (hex and charStack[-1] in hexDigits) \ + or (not hex and charStack[-1] in digits): + # At least one digit found, so consume the whole number + self.stream.unget(charStack[-1]) + output = self.consumeNumberEntity(hex) + else: + # No digits found + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "expected-numeric-entity"}) + self.stream.unget(charStack.pop()) + output = "&" + "".join(charStack) + + else: + # At this point in the process might have named entity. Entities + # are stored in the global variable "entities". + # + # Consume characters and compare to these to a substring of the + # entity names in the list until the substring no longer matches. + while (charStack[-1] is not EOF): + if not entitiesTrie.has_keys_with_prefix("".join(charStack)): + break + charStack.append(self.stream.char()) + + # At this point we have a string that starts with some characters + # that may match an entity + # Try to find the longest entity the string will match to take care + # of ¬i for instance. + try: + entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) + entityLength = len(entityName) + except KeyError: + entityName = None + + if entityName is not None: + if entityName[-1] != ";": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "named-entity-without-semicolon"}) + if (entityName[-1] != ";" and fromAttribute and + (charStack[entityLength] in asciiLetters or + charStack[entityLength] in digits or + charStack[entityLength] == "=")): + self.stream.unget(charStack.pop()) + output = "&" + "".join(charStack) + else: + output = entities[entityName] + self.stream.unget(charStack.pop()) + output += "".join(charStack[entityLength:]) + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-named-entity"}) + self.stream.unget(charStack.pop()) + output = "&" + "".join(charStack) + + if fromAttribute: + self.currentToken["data"][-1][1] += output + else: + if output in spaceCharacters: + tokenType = "SpaceCharacters" + else: + tokenType = "Characters" + self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) + + def processEntityInAttribute(self, allowedChar): + """This method replaces the need for "entityInAttributeValueState". + """ + self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) + + def emitCurrentToken(self): + """This method is a generic handler for emitting the tags. It also sets + the state to "data" because that's what's needed after a token has been + emitted. + """ + token = self.currentToken + # Add token to the queue to be yielded + if (token["type"] in tagTokenTypes): + if self.lowercaseElementName: + token["name"] = token["name"].translate(asciiUpper2Lower) + if token["type"] == tokenTypes["EndTag"]: + if token["data"]: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "attributes-in-end-tag"}) + if token["selfClosing"]: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "self-closing-flag-on-end-tag"}) + self.tokenQueue.append(token) + self.state = self.dataState + + # Below are the various tokenizer states worked out. + def dataState(self): + data = self.stream.char() + if data == "&": + self.state = self.entityDataState + elif data == "<": + self.state = self.tagOpenState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\u0000"}) + elif data is EOF: + # Tokenization ends. + return False + elif data in spaceCharacters: + # Directly after emitting a token you switch back to the "data + # state". At that point spaceCharacters are important so they are + # emitted separately. + self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": + data + self.stream.charsUntil(spaceCharacters, True)}) + # No need to update lastFourChars here, since the first space will + # have already been appended to lastFourChars and will have broken + # any <!-- or --> sequences + else: + chars = self.stream.charsUntil(("&", "<", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def entityDataState(self): + self.consumeEntity() + self.state = self.dataState + return True + + def rcdataState(self): + data = self.stream.char() + if data == "&": + self.state = self.characterReferenceInRcdata + elif data == "<": + self.state = self.rcdataLessThanSignState + elif data == EOF: + # Tokenization ends. + return False + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data in spaceCharacters: + # Directly after emitting a token you switch back to the "data + # state". At that point spaceCharacters are important so they are + # emitted separately. + self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": + data + self.stream.charsUntil(spaceCharacters, True)}) + # No need to update lastFourChars here, since the first space will + # have already been appended to lastFourChars and will have broken + # any <!-- or --> sequences + else: + chars = self.stream.charsUntil(("&", "<", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def characterReferenceInRcdata(self): + self.consumeEntity() + self.state = self.rcdataState + return True + + def rawtextState(self): + data = self.stream.char() + if data == "<": + self.state = self.rawtextLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data == EOF: + # Tokenization ends. + return False + else: + chars = self.stream.charsUntil(("<", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def scriptDataState(self): + data = self.stream.char() + if data == "<": + self.state = self.scriptDataLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data == EOF: + # Tokenization ends. + return False + else: + chars = self.stream.charsUntil(("<", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def plaintextState(self): + data = self.stream.char() + if data == EOF: + # Tokenization ends. + return False + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + self.stream.charsUntil("\u0000")}) + return True + + def tagOpenState(self): + data = self.stream.char() + if data == "!": + self.state = self.markupDeclarationOpenState + elif data == "/": + self.state = self.closeTagOpenState + elif data in asciiLetters: + self.currentToken = {"type": tokenTypes["StartTag"], + "name": data, "data": [], + "selfClosing": False, + "selfClosingAcknowledged": False} + self.state = self.tagNameState + elif data == ">": + # XXX In theory it could be something besides a tag name. But + # do we really care? + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-tag-name-but-got-right-bracket"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) + self.state = self.dataState + elif data == "?": + # XXX In theory it could be something besides a tag name. But + # do we really care? + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-tag-name-but-got-question-mark"}) + self.stream.unget(data) + self.state = self.bogusCommentState + else: + # XXX + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-tag-name"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.dataState + return True + + def closeTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.currentToken = {"type": tokenTypes["EndTag"], "name": data, + "data": [], "selfClosing": False} + self.state = self.tagNameState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-closing-tag-but-got-right-bracket"}) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-closing-tag-but-got-eof"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.state = self.dataState + else: + # XXX data can be _'_... + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-closing-tag-but-got-char", + "datavars": {"data": data}}) + self.stream.unget(data) + self.state = self.bogusCommentState + return True + + def tagNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeAttributeNameState + elif data == ">": + self.emitCurrentToken() + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-tag-name"}) + self.state = self.dataState + elif data == "/": + self.state = self.selfClosingStartTagState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["name"] += "\uFFFD" + else: + self.currentToken["name"] += data + # (Don't use charsUntil here, because tag names are + # very short and it's faster to not do anything fancy) + return True + + def rcdataLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.temporaryBuffer = "" + self.state = self.rcdataEndTagOpenState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.rcdataState + return True + + def rcdataEndTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.temporaryBuffer += data + self.state = self.rcdataEndTagNameState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.stream.unget(data) + self.state = self.rcdataState + return True + + def rcdataEndTagNameState(self): + appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() + data = self.stream.char() + if data in spaceCharacters and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.beforeAttributeNameState + elif data == "/" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.selfClosingStartTagState + elif data == ">" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.emitCurrentToken() + self.state = self.dataState + elif data in asciiLetters: + self.temporaryBuffer += data + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "</" + self.temporaryBuffer}) + self.stream.unget(data) + self.state = self.rcdataState + return True + + def rawtextLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.temporaryBuffer = "" + self.state = self.rawtextEndTagOpenState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.rawtextState + return True + + def rawtextEndTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.temporaryBuffer += data + self.state = self.rawtextEndTagNameState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.stream.unget(data) + self.state = self.rawtextState + return True + + def rawtextEndTagNameState(self): + appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() + data = self.stream.char() + if data in spaceCharacters and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.beforeAttributeNameState + elif data == "/" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.selfClosingStartTagState + elif data == ">" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.emitCurrentToken() + self.state = self.dataState + elif data in asciiLetters: + self.temporaryBuffer += data + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "</" + self.temporaryBuffer}) + self.stream.unget(data) + self.state = self.rawtextState + return True + + def scriptDataLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.temporaryBuffer = "" + self.state = self.scriptDataEndTagOpenState + elif data == "!": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) + self.state = self.scriptDataEscapeStartState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEndTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.temporaryBuffer += data + self.state = self.scriptDataEndTagNameState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEndTagNameState(self): + appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() + data = self.stream.char() + if data in spaceCharacters and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.beforeAttributeNameState + elif data == "/" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.selfClosingStartTagState + elif data == ">" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.emitCurrentToken() + self.state = self.dataState + elif data in asciiLetters: + self.temporaryBuffer += data + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "</" + self.temporaryBuffer}) + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEscapeStartState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataEscapeStartDashState + else: + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEscapeStartDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataEscapedDashDashState + else: + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEscapedState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataEscapedDashState + elif data == "<": + self.state = self.scriptDataEscapedLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data == EOF: + self.state = self.dataState + else: + chars = self.stream.charsUntil(("<", "-", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def scriptDataEscapedDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataEscapedDashDashState + elif data == "<": + self.state = self.scriptDataEscapedLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + self.state = self.scriptDataEscapedState + elif data == EOF: + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.state = self.scriptDataEscapedState + return True + + def scriptDataEscapedDashDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + elif data == "<": + self.state = self.scriptDataEscapedLessThanSignState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) + self.state = self.scriptDataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + self.state = self.scriptDataEscapedState + elif data == EOF: + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.state = self.scriptDataEscapedState + return True + + def scriptDataEscapedLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.temporaryBuffer = "" + self.state = self.scriptDataEscapedEndTagOpenState + elif data in asciiLetters: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) + self.temporaryBuffer = data + self.state = self.scriptDataDoubleEscapeStartState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.scriptDataEscapedState + return True + + def scriptDataEscapedEndTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.temporaryBuffer = data + self.state = self.scriptDataEscapedEndTagNameState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.stream.unget(data) + self.state = self.scriptDataEscapedState + return True + + def scriptDataEscapedEndTagNameState(self): + appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() + data = self.stream.char() + if data in spaceCharacters and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.beforeAttributeNameState + elif data == "/" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.selfClosingStartTagState + elif data == ">" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.emitCurrentToken() + self.state = self.dataState + elif data in asciiLetters: + self.temporaryBuffer += data + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "</" + self.temporaryBuffer}) + self.stream.unget(data) + self.state = self.scriptDataEscapedState + return True + + def scriptDataDoubleEscapeStartState(self): + data = self.stream.char() + if data in (spaceCharacters | frozenset(("/", ">"))): + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + if self.temporaryBuffer.lower() == "script": + self.state = self.scriptDataDoubleEscapedState + else: + self.state = self.scriptDataEscapedState + elif data in asciiLetters: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.temporaryBuffer += data + else: + self.stream.unget(data) + self.state = self.scriptDataEscapedState + return True + + def scriptDataDoubleEscapedState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataDoubleEscapedDashState + elif data == "<": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.state = self.scriptDataDoubleEscapedLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data == EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-script-in-script"}) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + return True + + def scriptDataDoubleEscapedDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataDoubleEscapedDashDashState + elif data == "<": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.state = self.scriptDataDoubleEscapedLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + self.state = self.scriptDataDoubleEscapedState + elif data == EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-script-in-script"}) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.state = self.scriptDataDoubleEscapedState + return True + + def scriptDataDoubleEscapedDashDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + elif data == "<": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.state = self.scriptDataDoubleEscapedLessThanSignState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) + self.state = self.scriptDataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + self.state = self.scriptDataDoubleEscapedState + elif data == EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-script-in-script"}) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.state = self.scriptDataDoubleEscapedState + return True + + def scriptDataDoubleEscapedLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) + self.temporaryBuffer = "" + self.state = self.scriptDataDoubleEscapeEndState + else: + self.stream.unget(data) + self.state = self.scriptDataDoubleEscapedState + return True + + def scriptDataDoubleEscapeEndState(self): + data = self.stream.char() + if data in (spaceCharacters | frozenset(("/", ">"))): + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + if self.temporaryBuffer.lower() == "script": + self.state = self.scriptDataEscapedState + else: + self.state = self.scriptDataDoubleEscapedState + elif data in asciiLetters: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.temporaryBuffer += data + else: + self.stream.unget(data) + self.state = self.scriptDataDoubleEscapedState + return True + + def beforeAttributeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data in asciiLetters: + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + elif data == ">": + self.emitCurrentToken() + elif data == "/": + self.state = self.selfClosingStartTagState + elif data in ("'", '"', "=", "<"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "invalid-character-in-attribute-name"}) + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"].append(["\uFFFD", ""]) + self.state = self.attributeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-attribute-name-but-got-eof"}) + self.state = self.dataState + else: + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + return True + + def attributeNameState(self): + data = self.stream.char() + leavingThisState = True + emitToken = False + if data == "=": + self.state = self.beforeAttributeValueState + elif data in asciiLetters: + self.currentToken["data"][-1][0] += data +\ + self.stream.charsUntil(asciiLetters, True) + leavingThisState = False + elif data == ">": + # XXX If we emit here the attributes are converted to a dict + # without being checked and when the code below runs we error + # because data is a dict not a list + emitToken = True + elif data in spaceCharacters: + self.state = self.afterAttributeNameState + elif data == "/": + self.state = self.selfClosingStartTagState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][0] += "\uFFFD" + leavingThisState = False + elif data in ("'", '"', "<"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": + "invalid-character-in-attribute-name"}) + self.currentToken["data"][-1][0] += data + leavingThisState = False + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "eof-in-attribute-name"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][0] += data + leavingThisState = False + + if leavingThisState: + # Attributes are not dropped at this stage. That happens when the + # start tag token is emitted so values can still be safely appended + # to attributes, but we do want to report the parse error in time. + if self.lowercaseAttrName: + self.currentToken["data"][-1][0] = ( + self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) + for name, value in self.currentToken["data"][:-1]: + if self.currentToken["data"][-1][0] == name: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "duplicate-attribute"}) + break + # XXX Fix for above XXX + if emitToken: + self.emitCurrentToken() + return True + + def afterAttributeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data == "=": + self.state = self.beforeAttributeValueState + elif data == ">": + self.emitCurrentToken() + elif data in asciiLetters: + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + elif data == "/": + self.state = self.selfClosingStartTagState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"].append(["\uFFFD", ""]) + self.state = self.attributeNameState + elif data in ("'", '"', "<"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "invalid-character-after-attribute-name"}) + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-end-of-tag-but-got-eof"}) + self.state = self.dataState + else: + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + return True + + def beforeAttributeValueState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data == "\"": + self.state = self.attributeValueDoubleQuotedState + elif data == "&": + self.state = self.attributeValueUnQuotedState + self.stream.unget(data) + elif data == "'": + self.state = self.attributeValueSingleQuotedState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-attribute-value-but-got-right-bracket"}) + self.emitCurrentToken() + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][1] += "\uFFFD" + self.state = self.attributeValueUnQuotedState + elif data in ("=", "<", "`"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "equals-in-unquoted-attribute-value"}) + self.currentToken["data"][-1][1] += data + self.state = self.attributeValueUnQuotedState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-attribute-value-but-got-eof"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][1] += data + self.state = self.attributeValueUnQuotedState + return True + + def attributeValueDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.afterAttributeValueState + elif data == "&": + self.processEntityInAttribute('"') + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][1] += "\uFFFD" + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-attribute-value-double-quote"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][1] += data +\ + self.stream.charsUntil(("\"", "&", "\u0000")) + return True + + def attributeValueSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.afterAttributeValueState + elif data == "&": + self.processEntityInAttribute("'") + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][1] += "\uFFFD" + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-attribute-value-single-quote"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][1] += data +\ + self.stream.charsUntil(("'", "&", "\u0000")) + return True + + def attributeValueUnQuotedState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeAttributeNameState + elif data == "&": + self.processEntityInAttribute(">") + elif data == ">": + self.emitCurrentToken() + elif data in ('"', "'", "=", "<", "`"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-character-in-unquoted-attribute-value"}) + self.currentToken["data"][-1][1] += data + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][1] += "\uFFFD" + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-attribute-value-no-quotes"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][1] += data + self.stream.charsUntil( + frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) + return True + + def afterAttributeValueState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeAttributeNameState + elif data == ">": + self.emitCurrentToken() + elif data == "/": + self.state = self.selfClosingStartTagState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-EOF-after-attribute-value"}) + self.stream.unget(data) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-character-after-attribute-value"}) + self.stream.unget(data) + self.state = self.beforeAttributeNameState + return True + + def selfClosingStartTagState(self): + data = self.stream.char() + if data == ">": + self.currentToken["selfClosing"] = True + self.emitCurrentToken() + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": + "unexpected-EOF-after-solidus-in-tag"}) + self.stream.unget(data) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-character-after-solidus-in-tag"}) + self.stream.unget(data) + self.state = self.beforeAttributeNameState + return True + + def bogusCommentState(self): + # Make a new comment token and give it as value all the characters + # until the first > or EOF (charsUntil checks for EOF automatically) + # and emit it. + data = self.stream.charsUntil(">") + data = data.replace("\u0000", "\uFFFD") + self.tokenQueue.append( + {"type": tokenTypes["Comment"], "data": data}) + + # Eat the character directly after the bogus comment which is either a + # ">" or an EOF. + self.stream.char() + self.state = self.dataState + return True + + def markupDeclarationOpenState(self): + charStack = [self.stream.char()] + if charStack[-1] == "-": + charStack.append(self.stream.char()) + if charStack[-1] == "-": + self.currentToken = {"type": tokenTypes["Comment"], "data": ""} + self.state = self.commentStartState + return True + elif charStack[-1] in ('d', 'D'): + matched = True + for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), + ('y', 'Y'), ('p', 'P'), ('e', 'E')): + charStack.append(self.stream.char()) + if charStack[-1] not in expected: + matched = False + break + if matched: + self.currentToken = {"type": tokenTypes["Doctype"], + "name": "", + "publicId": None, "systemId": None, + "correct": True} + self.state = self.doctypeState + return True + elif (charStack[-1] == "[" and + self.parser is not None and + self.parser.tree.openElements and + self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): + matched = True + for expected in ["C", "D", "A", "T", "A", "["]: + charStack.append(self.stream.char()) + if charStack[-1] != expected: + matched = False + break + if matched: + self.state = self.cdataSectionState + return True + + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-dashes-or-doctype"}) + + while charStack: + self.stream.unget(charStack.pop()) + self.state = self.bogusCommentState + return True + + def commentStartState(self): + data = self.stream.char() + if data == "-": + self.state = self.commentStartDashState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "incorrect-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += data + self.state = self.commentState + return True + + def commentStartDashState(self): + data = self.stream.char() + if data == "-": + self.state = self.commentEndState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "-\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "incorrect-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += "-" + data + self.state = self.commentState + return True + + def commentState(self): + data = self.stream.char() + if data == "-": + self.state = self.commentEndDashState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "\uFFFD" + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "eof-in-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += data + \ + self.stream.charsUntil(("-", "\u0000")) + return True + + def commentEndDashState(self): + data = self.stream.char() + if data == "-": + self.state = self.commentEndState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "-\uFFFD" + self.state = self.commentState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment-end-dash"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += "-" + data + self.state = self.commentState + return True + + def commentEndState(self): + data = self.stream.char() + if data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "--\uFFFD" + self.state = self.commentState + elif data == "!": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-bang-after-double-dash-in-comment"}) + self.state = self.commentEndBangState + elif data == "-": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-dash-after-double-dash-in-comment"}) + self.currentToken["data"] += data + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment-double-dash"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + # XXX + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-comment"}) + self.currentToken["data"] += "--" + data + self.state = self.commentState + return True + + def commentEndBangState(self): + data = self.stream.char() + if data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == "-": + self.currentToken["data"] += "--!" + self.state = self.commentEndDashState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "--!\uFFFD" + self.state = self.commentState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment-end-bang-state"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += "--!" + data + self.state = self.commentState + return True + + def doctypeState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeDoctypeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-doctype-name-but-got-eof"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "need-space-after-doctype"}) + self.stream.unget(data) + self.state = self.beforeDoctypeNameState + return True + + def beforeDoctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-doctype-name-but-got-right-bracket"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["name"] = "\uFFFD" + self.state = self.doctypeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-doctype-name-but-got-eof"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["name"] = data + self.state = self.doctypeNameState + return True + + def doctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) + self.state = self.afterDoctypeNameState + elif data == ">": + self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["name"] += "\uFFFD" + self.state = self.doctypeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype-name"}) + self.currentToken["correct"] = False + self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["name"] += data + return True + + def afterDoctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.currentToken["correct"] = False + self.stream.unget(data) + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + if data in ("p", "P"): + matched = True + for expected in (("u", "U"), ("b", "B"), ("l", "L"), + ("i", "I"), ("c", "C")): + data = self.stream.char() + if data not in expected: + matched = False + break + if matched: + self.state = self.afterDoctypePublicKeywordState + return True + elif data in ("s", "S"): + matched = True + for expected in (("y", "Y"), ("s", "S"), ("t", "T"), + ("e", "E"), ("m", "M")): + data = self.stream.char() + if data not in expected: + matched = False + break + if matched: + self.state = self.afterDoctypeSystemKeywordState + return True + + # All the characters read before the current 'data' will be + # [a-zA-Z], so they're garbage in the bogus doctype and can be + # discarded; only the latest character might be '>' or EOF + # and needs to be ungetted + self.stream.unget(data) + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-space-or-right-bracket-in-doctype", "datavars": + {"data": data}}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + + return True + + def afterDoctypePublicKeywordState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeDoctypePublicIdentifierState + elif data in ("'", '"'): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.stream.unget(data) + self.state = self.beforeDoctypePublicIdentifierState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.stream.unget(data) + self.state = self.beforeDoctypePublicIdentifierState + return True + + def beforeDoctypePublicIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == "\"": + self.currentToken["publicId"] = "" + self.state = self.doctypePublicIdentifierDoubleQuotedState + elif data == "'": + self.currentToken["publicId"] = "" + self.state = self.doctypePublicIdentifierSingleQuotedState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + return True + + def doctypePublicIdentifierDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.afterDoctypePublicIdentifierState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["publicId"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["publicId"] += data + return True + + def doctypePublicIdentifierSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.afterDoctypePublicIdentifierState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["publicId"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["publicId"] += data + return True + + def afterDoctypePublicIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.betweenDoctypePublicAndSystemIdentifiersState + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == '"': + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierDoubleQuotedState + elif data == "'": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierSingleQuotedState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + return True + + def betweenDoctypePublicAndSystemIdentifiersState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == '"': + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierDoubleQuotedState + elif data == "'": + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierSingleQuotedState + elif data == EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + return True + + def afterDoctypeSystemKeywordState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeDoctypeSystemIdentifierState + elif data in ("'", '"'): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.stream.unget(data) + self.state = self.beforeDoctypeSystemIdentifierState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.stream.unget(data) + self.state = self.beforeDoctypeSystemIdentifierState + return True + + def beforeDoctypeSystemIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == "\"": + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierDoubleQuotedState + elif data == "'": + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierSingleQuotedState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + return True + + def doctypeSystemIdentifierDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.afterDoctypeSystemIdentifierState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["systemId"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["systemId"] += data + return True + + def doctypeSystemIdentifierSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.afterDoctypeSystemIdentifierState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["systemId"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["systemId"] += data + return True + + def afterDoctypeSystemIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.state = self.bogusDoctypeState + return True + + def bogusDoctypeState(self): + data = self.stream.char() + if data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + # XXX EMIT + self.stream.unget(data) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + pass + return True + + def cdataSectionState(self): + data = [] + while True: + data.append(self.stream.charsUntil("]")) + data.append(self.stream.charsUntil(">")) + char = self.stream.char() + if char == EOF: + break + else: + assert char == ">" + if data[-1][-2:] == "]]": + data[-1] = data[-1][:-2] + break + else: + data.append(char) + + data = "".join(data) + # Deal with null here rather than in the parser + nullCount = data.count("\u0000") + if nullCount > 0: + for i in range(nullCount): + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + data = data.replace("\u0000", "\uFFFD") + if data: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": data}) + self.state = self.dataState + return True diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/__init__.py b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/__init__.py new file mode 100644 index 0000000000..6a6b2a4c45 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/__init__.py @@ -0,0 +1,76 @@ +"""A collection of modules for building different kinds of tree from +HTML documents. + +To create a treebuilder for a new type of tree, you need to do +implement several things: + +1) A set of classes for various types of elements: Document, Doctype, +Comment, Element. These must implement the interface of +_base.treebuilders.Node (although comment nodes have a different +signature for their constructor, see treebuilders.etree.Comment) +Textual content may also be implemented as another node type, or not, as +your tree implementation requires. + +2) A treebuilder object (called TreeBuilder by convention) that +inherits from treebuilders._base.TreeBuilder. This has 4 required attributes: +documentClass - the class to use for the bottommost node of a document +elementClass - the class to use for HTML Elements +commentClass - the class to use for comments +doctypeClass - the class to use for doctypes +It also has one required method: +getDocument - Returns the root node of the complete document tree + +3) If you wish to run the unit tests, you must also create a +testSerializer method on your treebuilder which accepts a node and +returns a string containing Node and its children serialized according +to the format used in the unittests +""" + +from __future__ import absolute_import, division, unicode_literals + +from ..utils import default_etree + +treeBuilderCache = {} + + +def getTreeBuilder(treeType, implementation=None, **kwargs): + """Get a TreeBuilder class for various types of tree with built-in support + + treeType - the name of the tree type required (case-insensitive). Supported + values are: + + "dom" - A generic builder for DOM implementations, defaulting to + a xml.dom.minidom based implementation. + "etree" - A generic builder for tree implementations exposing an + ElementTree-like interface, defaulting to + xml.etree.cElementTree if available and + xml.etree.ElementTree if not. + "lxml" - A etree-based builder for lxml.etree, handling + limitations of lxml's implementation. + + implementation - (Currently applies to the "etree" and "dom" tree types). A + module implementing the tree type e.g. + xml.etree.ElementTree or xml.etree.cElementTree.""" + + treeType = treeType.lower() + if treeType not in treeBuilderCache: + if treeType == "dom": + from . import dom + # Come up with a sane default (pref. from the stdlib) + if implementation is None: + from xml.dom import minidom + implementation = minidom + # NEVER cache here, caching is done in the dom submodule + return dom.getDomModule(implementation, **kwargs).TreeBuilder + elif treeType == "lxml": + from . import etree_lxml + treeBuilderCache[treeType] = etree_lxml.TreeBuilder + elif treeType == "etree": + from . import etree + if implementation is None: + implementation = default_etree + # NEVER cache here, caching is done in the etree submodule + return etree.getETreeModule(implementation, **kwargs).TreeBuilder + else: + raise ValueError("""Unrecognised treebuilder "%s" """ % treeType) + return treeBuilderCache.get(treeType) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/_base.py b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/_base.py new file mode 100644 index 0000000000..8d5bc7e8d4 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/_base.py @@ -0,0 +1,377 @@ +from __future__ import absolute_import, division, unicode_literals +from pip.vendor.six import text_type + +from ..constants import scopingElements, tableInsertModeElements, namespaces + +# The scope markers are inserted when entering object elements, +# marquees, table cells, and table captions, and are used to prevent formatting +# from "leaking" into tables, object elements, and marquees. +Marker = None + +listElementsMap = { + None: (frozenset(scopingElements), False), + "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False), + "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"), + (namespaces["html"], "ul")])), False), + "table": (frozenset([(namespaces["html"], "html"), + (namespaces["html"], "table")]), False), + "select": (frozenset([(namespaces["html"], "optgroup"), + (namespaces["html"], "option")]), True) +} + + +class Node(object): + def __init__(self, name): + """Node representing an item in the tree. + name - The tag name associated with the node + parent - The parent of the current node (or None for the document node) + value - The value of the current node (applies to text nodes and + comments + attributes - a dict holding name, value pairs for attributes of the node + childNodes - a list of child nodes of the current node. This must + include all elements but not necessarily other node types + _flags - A list of miscellaneous flags that can be set on the node + """ + self.name = name + self.parent = None + self.value = None + self.attributes = {} + self.childNodes = [] + self._flags = [] + + def __str__(self): + attributesStr = " ".join(["%s=\"%s\"" % (name, value) + for name, value in + self.attributes.items()]) + if attributesStr: + return "<%s %s>" % (self.name, attributesStr) + else: + return "<%s>" % (self.name) + + def __repr__(self): + return "<%s>" % (self.name) + + def appendChild(self, node): + """Insert node as a child of the current node + """ + raise NotImplementedError + + def insertText(self, data, insertBefore=None): + """Insert data as text in the current node, positioned before the + start of node insertBefore or to the end of the node's text. + """ + raise NotImplementedError + + def insertBefore(self, node, refNode): + """Insert node as a child of the current node, before refNode in the + list of child nodes. Raises ValueError if refNode is not a child of + the current node""" + raise NotImplementedError + + def removeChild(self, node): + """Remove node from the children of the current node + """ + raise NotImplementedError + + def reparentChildren(self, newParent): + """Move all the children of the current node to newParent. + This is needed so that trees that don't store text as nodes move the + text in the correct way + """ + # XXX - should this method be made more general? + for child in self.childNodes: + newParent.appendChild(child) + self.childNodes = [] + + def cloneNode(self): + """Return a shallow copy of the current node i.e. a node with the same + name and attributes but with no parent or child nodes + """ + raise NotImplementedError + + def hasContent(self): + """Return true if the node has children or text, false otherwise + """ + raise NotImplementedError + + +class ActiveFormattingElements(list): + def append(self, node): + equalCount = 0 + if node != Marker: + for element in self[::-1]: + if element == Marker: + break + if self.nodesEqual(element, node): + equalCount += 1 + if equalCount == 3: + self.remove(element) + break + list.append(self, node) + + def nodesEqual(self, node1, node2): + if not node1.nameTuple == node2.nameTuple: + return False + + if not node1.attributes == node2.attributes: + return False + + return True + + +class TreeBuilder(object): + """Base treebuilder implementation + documentClass - the class to use for the bottommost node of a document + elementClass - the class to use for HTML Elements + commentClass - the class to use for comments + doctypeClass - the class to use for doctypes + """ + + # Document class + documentClass = None + + # The class to use for creating a node + elementClass = None + + # The class to use for creating comments + commentClass = None + + # The class to use for creating doctypes + doctypeClass = None + + # Fragment class + fragmentClass = None + + def __init__(self, namespaceHTMLElements): + if namespaceHTMLElements: + self.defaultNamespace = "http://www.w3.org/1999/xhtml" + else: + self.defaultNamespace = None + self.reset() + + def reset(self): + self.openElements = [] + self.activeFormattingElements = ActiveFormattingElements() + + # XXX - rename these to headElement, formElement + self.headPointer = None + self.formPointer = None + + self.insertFromTable = False + + self.document = self.documentClass() + + def elementInScope(self, target, variant=None): + + # If we pass a node in we match that. if we pass a string + # match any node with that name + exactNode = hasattr(target, "nameTuple") + + listElements, invert = listElementsMap[variant] + + for node in reversed(self.openElements): + if (node.name == target and not exactNode or + node == target and exactNode): + return True + elif (invert ^ (node.nameTuple in listElements)): + return False + + assert False # We should never reach this point + + def reconstructActiveFormattingElements(self): + # Within this algorithm the order of steps described in the + # specification is not quite the same as the order of steps in the + # code. It should still do the same though. + + # Step 1: stop the algorithm when there's nothing to do. + if not self.activeFormattingElements: + return + + # Step 2 and step 3: we start with the last element. So i is -1. + i = len(self.activeFormattingElements) - 1 + entry = self.activeFormattingElements[i] + if entry == Marker or entry in self.openElements: + return + + # Step 6 + while entry != Marker and entry not in self.openElements: + if i == 0: + # This will be reset to 0 below + i = -1 + break + i -= 1 + # Step 5: let entry be one earlier in the list. + entry = self.activeFormattingElements[i] + + while True: + # Step 7 + i += 1 + + # Step 8 + entry = self.activeFormattingElements[i] + clone = entry.cloneNode() # Mainly to get a new copy of the attributes + + # Step 9 + element = self.insertElement({"type": "StartTag", + "name": clone.name, + "namespace": clone.namespace, + "data": clone.attributes}) + + # Step 10 + self.activeFormattingElements[i] = element + + # Step 11 + if element == self.activeFormattingElements[-1]: + break + + def clearActiveFormattingElements(self): + entry = self.activeFormattingElements.pop() + while self.activeFormattingElements and entry != Marker: + entry = self.activeFormattingElements.pop() + + def elementInActiveFormattingElements(self, name): + """Check if an element exists between the end of the active + formatting elements and the last marker. If it does, return it, else + return false""" + + for item in self.activeFormattingElements[::-1]: + # Check for Marker first because if it's a Marker it doesn't have a + # name attribute. + if item == Marker: + break + elif item.name == name: + return item + return False + + def insertRoot(self, token): + element = self.createElement(token) + self.openElements.append(element) + self.document.appendChild(element) + + def insertDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + + doctype = self.doctypeClass(name, publicId, systemId) + self.document.appendChild(doctype) + + def insertComment(self, token, parent=None): + if parent is None: + parent = self.openElements[-1] + parent.appendChild(self.commentClass(token["data"])) + + def createElement(self, token): + """Create an element but don't insert it anywhere""" + name = token["name"] + namespace = token.get("namespace", self.defaultNamespace) + element = self.elementClass(name, namespace) + element.attributes = token["data"] + return element + + def _getInsertFromTable(self): + return self._insertFromTable + + def _setInsertFromTable(self, value): + """Switch the function used to insert an element from the + normal one to the misnested table one and back again""" + self._insertFromTable = value + if value: + self.insertElement = self.insertElementTable + else: + self.insertElement = self.insertElementNormal + + insertFromTable = property(_getInsertFromTable, _setInsertFromTable) + + def insertElementNormal(self, token): + name = token["name"] + assert isinstance(name, text_type), "Element %s not unicode" % name + namespace = token.get("namespace", self.defaultNamespace) + element = self.elementClass(name, namespace) + element.attributes = token["data"] + self.openElements[-1].appendChild(element) + self.openElements.append(element) + return element + + def insertElementTable(self, token): + """Create an element and insert it into the tree""" + element = self.createElement(token) + if self.openElements[-1].name not in tableInsertModeElements: + return self.insertElementNormal(token) + else: + # We should be in the InTable mode. This means we want to do + # special magic element rearranging + parent, insertBefore = self.getTableMisnestedNodePosition() + if insertBefore is None: + parent.appendChild(element) + else: + parent.insertBefore(element, insertBefore) + self.openElements.append(element) + return element + + def insertText(self, data, parent=None): + """Insert text data.""" + if parent is None: + parent = self.openElements[-1] + + if (not self.insertFromTable or (self.insertFromTable and + self.openElements[-1].name + not in tableInsertModeElements)): + parent.insertText(data) + else: + # We should be in the InTable mode. This means we want to do + # special magic element rearranging + parent, insertBefore = self.getTableMisnestedNodePosition() + parent.insertText(data, insertBefore) + + def getTableMisnestedNodePosition(self): + """Get the foster parent element, and sibling to insert before + (or None) when inserting a misnested table node""" + # The foster parent element is the one which comes before the most + # recently opened table element + # XXX - this is really inelegant + lastTable = None + fosterParent = None + insertBefore = None + for elm in self.openElements[::-1]: + if elm.name == "table": + lastTable = elm + break + if lastTable: + # XXX - we should really check that this parent is actually a + # node here + if lastTable.parent: + fosterParent = lastTable.parent + insertBefore = lastTable + else: + fosterParent = self.openElements[ + self.openElements.index(lastTable) - 1] + else: + fosterParent = self.openElements[0] + return fosterParent, insertBefore + + def generateImpliedEndTags(self, exclude=None): + name = self.openElements[-1].name + # XXX td, th and tr are not actually needed + if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) + and name != exclude): + self.openElements.pop() + # XXX This is not entirely what the specification says. We should + # investigate it more closely. + self.generateImpliedEndTags(exclude) + + def getDocument(self): + "Return the final tree" + return self.document + + def getFragment(self): + "Return the final fragment" + # assert self.innerHTML + fragment = self.fragmentClass() + self.openElements[0].reparentChildren(fragment) + return fragment + + def testSerializer(self, node): + """Serialize the subtree of node in the format required by unit tests + node - the node from which to start serializing""" + raise NotImplementedError diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/dom.py b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/dom.py new file mode 100644 index 0000000000..f9e0d76e76 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/dom.py @@ -0,0 +1,290 @@ +from __future__ import absolute_import, division, unicode_literals + + +from xml.dom import minidom, Node, XML_NAMESPACE, XMLNS_NAMESPACE +import weakref + +from . import _base +from .. import constants +from ..constants import namespaces +from ..utils import moduleFactoryFactory + + +def getDomBuilder(DomImplementation): + Dom = DomImplementation + + class AttrList(object): + def __init__(self, element): + self.element = element + + def __iter__(self): + return list(self.element.attributes.items()).__iter__() + + def __setitem__(self, name, value): + self.element.setAttribute(name, value) + + def __len__(self): + return len(list(self.element.attributes.items())) + + def items(self): + return [(item[0], item[1]) for item in + list(self.element.attributes.items())] + + def keys(self): + return list(self.element.attributes.keys()) + + def __getitem__(self, name): + return self.element.getAttribute(name) + + def __contains__(self, name): + if isinstance(name, tuple): + raise NotImplementedError + else: + return self.element.hasAttribute(name) + + class NodeBuilder(_base.Node): + def __init__(self, element): + _base.Node.__init__(self, element.nodeName) + self.element = element + + namespace = property(lambda self: hasattr(self.element, "namespaceURI") + and self.element.namespaceURI or None) + + def appendChild(self, node): + node.parent = self + self.element.appendChild(node.element) + + def insertText(self, data, insertBefore=None): + text = self.element.ownerDocument.createTextNode(data) + if insertBefore: + self.element.insertBefore(text, insertBefore.element) + else: + self.element.appendChild(text) + + def insertBefore(self, node, refNode): + self.element.insertBefore(node.element, refNode.element) + node.parent = self + + def removeChild(self, node): + if node.element.parentNode == self.element: + self.element.removeChild(node.element) + node.parent = None + + def reparentChildren(self, newParent): + while self.element.hasChildNodes(): + child = self.element.firstChild + self.element.removeChild(child) + newParent.element.appendChild(child) + self.childNodes = [] + + def getAttributes(self): + return AttrList(self.element) + + def setAttributes(self, attributes): + if attributes: + for name, value in list(attributes.items()): + if isinstance(name, tuple): + if name[0] is not None: + qualifiedName = (name[0] + ":" + name[1]) + else: + qualifiedName = name[1] + self.element.setAttributeNS(name[2], qualifiedName, + value) + else: + self.element.setAttribute( + name, value) + attributes = property(getAttributes, setAttributes) + + def cloneNode(self): + return NodeBuilder(self.element.cloneNode(False)) + + def hasContent(self): + return self.element.hasChildNodes() + + def getNameTuple(self): + if self.namespace is None: + return namespaces["html"], self.name + else: + return self.namespace, self.name + + nameTuple = property(getNameTuple) + + class TreeBuilder(_base.TreeBuilder): + def documentClass(self): + self.dom = Dom.getDOMImplementation().createDocument(None, None, None) + return weakref.proxy(self) + + def insertDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + + domimpl = Dom.getDOMImplementation() + doctype = domimpl.createDocumentType(name, publicId, systemId) + self.document.appendChild(NodeBuilder(doctype)) + if Dom == minidom: + doctype.ownerDocument = self.dom + + def elementClass(self, name, namespace=None): + if namespace is None and self.defaultNamespace is None: + node = self.dom.createElement(name) + else: + node = self.dom.createElementNS(namespace, name) + + return NodeBuilder(node) + + def commentClass(self, data): + return NodeBuilder(self.dom.createComment(data)) + + def fragmentClass(self): + return NodeBuilder(self.dom.createDocumentFragment()) + + def appendChild(self, node): + self.dom.appendChild(node.element) + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + return self.dom + + def getFragment(self): + return _base.TreeBuilder.getFragment(self).element + + def insertText(self, data, parent=None): + data = data + if parent != self: + _base.TreeBuilder.insertText(self, data, parent) + else: + # HACK: allow text nodes as children of the document node + if hasattr(self.dom, '_child_node_types'): + if not Node.TEXT_NODE in self.dom._child_node_types: + self.dom._child_node_types = list(self.dom._child_node_types) + self.dom._child_node_types.append(Node.TEXT_NODE) + self.dom.appendChild(self.dom.createTextNode(data)) + + implementation = DomImplementation + name = None + + def testSerializer(element): + element.normalize() + rv = [] + + def serializeElement(element, indent=0): + if element.nodeType == Node.DOCUMENT_TYPE_NODE: + if element.name: + if element.publicId or element.systemId: + publicId = element.publicId or "" + systemId = element.systemId or "" + rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % + (' ' * indent, element.name, publicId, systemId)) + else: + rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name)) + else: + rv.append("|%s<!DOCTYPE >" % (' ' * indent,)) + elif element.nodeType == Node.DOCUMENT_NODE: + rv.append("#document") + elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE: + rv.append("#document-fragment") + elif element.nodeType == Node.COMMENT_NODE: + rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue)) + elif element.nodeType == Node.TEXT_NODE: + rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue)) + else: + if (hasattr(element, "namespaceURI") and + element.namespaceURI is not None): + name = "%s %s" % (constants.prefixes[element.namespaceURI], + element.nodeName) + else: + name = element.nodeName + rv.append("|%s<%s>" % (' ' * indent, name)) + if element.hasAttributes(): + attributes = [] + for i in range(len(element.attributes)): + attr = element.attributes.item(i) + name = attr.nodeName + value = attr.value + ns = attr.namespaceURI + if ns: + name = "%s %s" % (constants.prefixes[ns], attr.localName) + else: + name = attr.nodeName + attributes.append((name, value)) + + for name, value in sorted(attributes): + rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) + indent += 2 + for child in element.childNodes: + serializeElement(child, indent) + serializeElement(element, 0) + + return "\n".join(rv) + + def dom2sax(node, handler, nsmap={'xml': XML_NAMESPACE}): + if node.nodeType == Node.ELEMENT_NODE: + if not nsmap: + handler.startElement(node.nodeName, node.attributes) + for child in node.childNodes: + dom2sax(child, handler, nsmap) + handler.endElement(node.nodeName) + else: + attributes = dict(node.attributes.itemsNS()) + + # gather namespace declarations + prefixes = [] + for attrname in list(node.attributes.keys()): + attr = node.getAttributeNode(attrname) + if (attr.namespaceURI == XMLNS_NAMESPACE or + (attr.namespaceURI is None and attr.nodeName.startswith('xmlns'))): + prefix = (attr.nodeName != 'xmlns' and attr.nodeName or None) + handler.startPrefixMapping(prefix, attr.nodeValue) + prefixes.append(prefix) + nsmap = nsmap.copy() + nsmap[prefix] = attr.nodeValue + del attributes[(attr.namespaceURI, attr.nodeName)] + + # apply namespace declarations + for attrname in list(node.attributes.keys()): + attr = node.getAttributeNode(attrname) + if attr.namespaceURI is None and ':' in attr.nodeName: + prefix = attr.nodeName.split(':')[0] + if prefix in nsmap: + del attributes[(attr.namespaceURI, attr.nodeName)] + attributes[(nsmap[prefix], attr.nodeName)] = attr.nodeValue + + # SAX events + ns = node.namespaceURI or nsmap.get(None, None) + handler.startElementNS((ns, node.nodeName), node.nodeName, attributes) + for child in node.childNodes: + dom2sax(child, handler, nsmap) + handler.endElementNS((ns, node.nodeName), node.nodeName) + for prefix in prefixes: + handler.endPrefixMapping(prefix) + + elif node.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: + handler.characters(node.nodeValue) + + elif node.nodeType == Node.DOCUMENT_NODE: + handler.startDocument() + for child in node.childNodes: + dom2sax(child, handler, nsmap) + handler.endDocument() + + elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE: + for child in node.childNodes: + dom2sax(child, handler, nsmap) + + else: + # ATTRIBUTE_NODE + # ENTITY_NODE + # PROCESSING_INSTRUCTION_NODE + # COMMENT_NODE + # DOCUMENT_TYPE_NODE + # NOTATION_NODE + pass + + return locals() + + +# The actual means to get a module! +getDomModule = moduleFactoryFactory(getDomBuilder) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/etree.py b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/etree.py new file mode 100644 index 0000000000..7b0cdd8e6d --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/etree.py @@ -0,0 +1,337 @@ +from __future__ import absolute_import, division, unicode_literals +from pip.vendor.six import text_type + +import re + +from . import _base +from .. import ihatexml +from .. import constants +from ..constants import namespaces +from ..utils import moduleFactoryFactory + +tag_regexp = re.compile("{([^}]*)}(.*)") + + +def getETreeBuilder(ElementTreeImplementation, fullTree=False): + ElementTree = ElementTreeImplementation + ElementTreeCommentType = ElementTree.Comment("asd").tag + + class Element(_base.Node): + def __init__(self, name, namespace=None): + self._name = name + self._namespace = namespace + self._element = ElementTree.Element(self._getETreeTag(name, + namespace)) + if namespace is None: + self.nameTuple = namespaces["html"], self._name + else: + self.nameTuple = self._namespace, self._name + self.parent = None + self._childNodes = [] + self._flags = [] + + def _getETreeTag(self, name, namespace): + if namespace is None: + etree_tag = name + else: + etree_tag = "{%s}%s" % (namespace, name) + return etree_tag + + def _setName(self, name): + self._name = name + self._element.tag = self._getETreeTag(self._name, self._namespace) + + def _getName(self): + return self._name + + name = property(_getName, _setName) + + def _setNamespace(self, namespace): + self._namespace = namespace + self._element.tag = self._getETreeTag(self._name, self._namespace) + + def _getNamespace(self): + return self._namespace + + namespace = property(_getNamespace, _setNamespace) + + def _getAttributes(self): + return self._element.attrib + + def _setAttributes(self, attributes): + # Delete existing attributes first + # XXX - there may be a better way to do this... + for key in list(self._element.attrib.keys()): + del self._element.attrib[key] + for key, value in attributes.items(): + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], key[1]) + else: + name = key + self._element.set(name, value) + + attributes = property(_getAttributes, _setAttributes) + + def _getChildNodes(self): + return self._childNodes + + def _setChildNodes(self, value): + del self._element[:] + self._childNodes = [] + for element in value: + self.insertChild(element) + + childNodes = property(_getChildNodes, _setChildNodes) + + def hasContent(self): + """Return true if the node has children or text""" + return bool(self._element.text or len(self._element)) + + def appendChild(self, node): + self._childNodes.append(node) + self._element.append(node._element) + node.parent = self + + def insertBefore(self, node, refNode): + index = list(self._element).index(refNode._element) + self._element.insert(index, node._element) + node.parent = self + + def removeChild(self, node): + self._element.remove(node._element) + node.parent = None + + def insertText(self, data, insertBefore=None): + if not(len(self._element)): + if not self._element.text: + self._element.text = "" + self._element.text += data + elif insertBefore is None: + # Insert the text as the tail of the last child element + if not self._element[-1].tail: + self._element[-1].tail = "" + self._element[-1].tail += data + else: + # Insert the text before the specified node + children = list(self._element) + index = children.index(insertBefore._element) + if index > 0: + if not self._element[index - 1].tail: + self._element[index - 1].tail = "" + self._element[index - 1].tail += data + else: + if not self._element.text: + self._element.text = "" + self._element.text += data + + def cloneNode(self): + element = type(self)(self.name, self.namespace) + for name, value in self.attributes.items(): + element.attributes[name] = value + return element + + def reparentChildren(self, newParent): + if newParent.childNodes: + newParent.childNodes[-1]._element.tail += self._element.text + else: + if not newParent._element.text: + newParent._element.text = "" + if self._element.text is not None: + newParent._element.text += self._element.text + self._element.text = "" + _base.Node.reparentChildren(self, newParent) + + class Comment(Element): + def __init__(self, data): + # Use the superclass constructor to set all properties on the + # wrapper element + self._element = ElementTree.Comment(data) + self.parent = None + self._childNodes = [] + self._flags = [] + + def _getData(self): + return self._element.text + + def _setData(self, value): + self._element.text = value + + data = property(_getData, _setData) + + class DocumentType(Element): + def __init__(self, name, publicId, systemId): + Element.__init__(self, "<!DOCTYPE>") + self._element.text = name + self.publicId = publicId + self.systemId = systemId + + def _getPublicId(self): + return self._element.get("publicId", "") + + def _setPublicId(self, value): + if value is not None: + self._element.set("publicId", value) + + publicId = property(_getPublicId, _setPublicId) + + def _getSystemId(self): + return self._element.get("systemId", "") + + def _setSystemId(self, value): + if value is not None: + self._element.set("systemId", value) + + systemId = property(_getSystemId, _setSystemId) + + class Document(Element): + def __init__(self): + Element.__init__(self, "DOCUMENT_ROOT") + + class DocumentFragment(Element): + def __init__(self): + Element.__init__(self, "DOCUMENT_FRAGMENT") + + def testSerializer(element): + rv = [] + + def serializeElement(element, indent=0): + if not(hasattr(element, "tag")): + element = element.getroot() + if element.tag == "<!DOCTYPE>": + if element.get("publicId") or element.get("systemId"): + publicId = element.get("publicId") or "" + systemId = element.get("systemId") or "" + rv.append("""<!DOCTYPE %s "%s" "%s">""" % + (element.text, publicId, systemId)) + else: + rv.append("<!DOCTYPE %s>" % (element.text,)) + elif element.tag == "DOCUMENT_ROOT": + rv.append("#document") + if element.text is not None: + rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) + if element.tail is not None: + raise TypeError("Document node cannot have tail") + if hasattr(element, "attrib") and len(element.attrib): + raise TypeError("Document node cannot have attributes") + elif element.tag == ElementTreeCommentType: + rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) + else: + assert isinstance(element.tag, text_type), \ + "Expected unicode, got %s, %s" % (type(element.tag), element.tag) + nsmatch = tag_regexp.match(element.tag) + + if nsmatch is None: + name = element.tag + else: + ns, name = nsmatch.groups() + prefix = constants.prefixes[ns] + name = "%s %s" % (prefix, name) + rv.append("|%s<%s>" % (' ' * indent, name)) + + if hasattr(element, "attrib"): + attributes = [] + for name, value in element.attrib.items(): + nsmatch = tag_regexp.match(name) + if nsmatch is not None: + ns, name = nsmatch.groups() + prefix = constants.prefixes[ns] + attr_string = "%s %s" % (prefix, name) + else: + attr_string = name + attributes.append((attr_string, value)) + + for name, value in sorted(attributes): + rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) + if element.text: + rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) + indent += 2 + for child in element: + serializeElement(child, indent) + if element.tail: + rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) + serializeElement(element, 0) + + return "\n".join(rv) + + def tostring(element): + """Serialize an element and its child nodes to a string""" + rv = [] + filter = ihatexml.InfosetFilter() + + def serializeElement(element): + if isinstance(element, ElementTree.ElementTree): + element = element.getroot() + + if element.tag == "<!DOCTYPE>": + if element.get("publicId") or element.get("systemId"): + publicId = element.get("publicId") or "" + systemId = element.get("systemId") or "" + rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" % + (element.text, publicId, systemId)) + else: + rv.append("<!DOCTYPE %s>" % (element.text,)) + elif element.tag == "DOCUMENT_ROOT": + if element.text is not None: + rv.append(element.text) + if element.tail is not None: + raise TypeError("Document node cannot have tail") + if hasattr(element, "attrib") and len(element.attrib): + raise TypeError("Document node cannot have attributes") + + for child in element: + serializeElement(child) + + elif element.tag == ElementTreeCommentType: + rv.append("<!--%s-->" % (element.text,)) + else: + # This is assumed to be an ordinary element + if not element.attrib: + rv.append("<%s>" % (filter.fromXmlName(element.tag),)) + else: + attr = " ".join(["%s=\"%s\"" % ( + filter.fromXmlName(name), value) + for name, value in element.attrib.items()]) + rv.append("<%s %s>" % (element.tag, attr)) + if element.text: + rv.append(element.text) + + for child in element: + serializeElement(child) + + rv.append("</%s>" % (element.tag,)) + + if element.tail: + rv.append(element.tail) + + serializeElement(element) + + return "".join(rv) + + class TreeBuilder(_base.TreeBuilder): + documentClass = Document + doctypeClass = DocumentType + elementClass = Element + commentClass = Comment + fragmentClass = DocumentFragment + implementation = ElementTreeImplementation + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + if fullTree: + return self.document._element + else: + if self.defaultNamespace is not None: + return self.document._element.find( + "{%s}html" % self.defaultNamespace) + else: + return self.document._element.find("html") + + def getFragment(self): + return _base.TreeBuilder.getFragment(self)._element + + return locals() + + +getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/etree_lxml.py b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/etree_lxml.py new file mode 100644 index 0000000000..35d08efaa6 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treebuilders/etree_lxml.py @@ -0,0 +1,369 @@ +"""Module for supporting the lxml.etree library. The idea here is to use as much +of the native library as possible, without using fragile hacks like custom element +names that break between releases. The downside of this is that we cannot represent +all possible trees; specifically the following are known to cause problems: + +Text or comments as siblings of the root element +Docypes with no name + +When any of these things occur, we emit a DataLossWarning +""" + +from __future__ import absolute_import, division, unicode_literals + +import warnings +import re +import sys + +from . import _base +from ..constants import DataLossWarning +from .. import constants +from . import etree as etree_builders +from .. import ihatexml + +import lxml.etree as etree + + +fullTree = True +tag_regexp = re.compile("{([^}]*)}(.*)") + +comment_type = etree.Comment("asd").tag + + +class DocumentType(object): + def __init__(self, name, publicId, systemId): + self.name = name + self.publicId = publicId + self.systemId = systemId + + +class Document(object): + def __init__(self): + self._elementTree = None + self._childNodes = [] + + def appendChild(self, element): + self._elementTree.getroot().addnext(element._element) + + def _getChildNodes(self): + return self._childNodes + + childNodes = property(_getChildNodes) + + +def testSerializer(element): + rv = [] + finalText = None + infosetFilter = ihatexml.InfosetFilter() + + def serializeElement(element, indent=0): + if not hasattr(element, "tag"): + if hasattr(element, "getroot"): + # Full tree case + rv.append("#document") + if element.docinfo.internalDTD: + if not (element.docinfo.public_id or + element.docinfo.system_url): + dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name + else: + dtd_str = """<!DOCTYPE %s "%s" "%s">""" % ( + element.docinfo.root_name, + element.docinfo.public_id, + element.docinfo.system_url) + rv.append("|%s%s" % (' ' * (indent + 2), dtd_str)) + next_element = element.getroot() + while next_element.getprevious() is not None: + next_element = next_element.getprevious() + while next_element is not None: + serializeElement(next_element, indent + 2) + next_element = next_element.getnext() + elif isinstance(element, str) or isinstance(element, bytes): + # Text in a fragment + assert isinstance(element, str) or sys.version_info.major == 2 + rv.append("|%s\"%s\"" % (' ' * indent, element)) + else: + # Fragment case + rv.append("#document-fragment") + for next_element in element: + serializeElement(next_element, indent + 2) + elif element.tag == comment_type: + rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) + if hasattr(element, "tail") and element.tail: + rv.append("|%s\"%s\"" % (' ' * indent, element.tail)) + else: + assert isinstance(element, etree._Element) + nsmatch = etree_builders.tag_regexp.match(element.tag) + if nsmatch is not None: + ns = nsmatch.group(1) + tag = nsmatch.group(2) + prefix = constants.prefixes[ns] + rv.append("|%s<%s %s>" % (' ' * indent, prefix, + infosetFilter.fromXmlName(tag))) + else: + rv.append("|%s<%s>" % (' ' * indent, + infosetFilter.fromXmlName(element.tag))) + + if hasattr(element, "attrib"): + attributes = [] + for name, value in element.attrib.items(): + nsmatch = tag_regexp.match(name) + if nsmatch is not None: + ns, name = nsmatch.groups() + name = infosetFilter.fromXmlName(name) + prefix = constants.prefixes[ns] + attr_string = "%s %s" % (prefix, name) + else: + attr_string = infosetFilter.fromXmlName(name) + attributes.append((attr_string, value)) + + for name, value in sorted(attributes): + rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) + + if element.text: + rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) + indent += 2 + for child in element: + serializeElement(child, indent) + if hasattr(element, "tail") and element.tail: + rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) + serializeElement(element, 0) + + if finalText is not None: + rv.append("|%s\"%s\"" % (' ' * 2, finalText)) + + return "\n".join(rv) + + +def tostring(element): + """Serialize an element and its child nodes to a string""" + rv = [] + finalText = None + + def serializeElement(element): + if not hasattr(element, "tag"): + if element.docinfo.internalDTD: + if element.docinfo.doctype: + dtd_str = element.docinfo.doctype + else: + dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name + rv.append(dtd_str) + serializeElement(element.getroot()) + + elif element.tag == comment_type: + rv.append("<!--%s-->" % (element.text,)) + + else: + # This is assumed to be an ordinary element + if not element.attrib: + rv.append("<%s>" % (element.tag,)) + else: + attr = " ".join(["%s=\"%s\"" % (name, value) + for name, value in element.attrib.items()]) + rv.append("<%s %s>" % (element.tag, attr)) + if element.text: + rv.append(element.text) + + for child in element: + serializeElement(child) + + rv.append("</%s>" % (element.tag,)) + + if hasattr(element, "tail") and element.tail: + rv.append(element.tail) + + serializeElement(element) + + if finalText is not None: + rv.append("%s\"" % (' ' * 2, finalText)) + + return "".join(rv) + + +class TreeBuilder(_base.TreeBuilder): + documentClass = Document + doctypeClass = DocumentType + elementClass = None + commentClass = None + fragmentClass = Document + implementation = etree + + def __init__(self, namespaceHTMLElements, fullTree=False): + builder = etree_builders.getETreeModule(etree, fullTree=fullTree) + infosetFilter = self.infosetFilter = ihatexml.InfosetFilter() + self.namespaceHTMLElements = namespaceHTMLElements + + class Attributes(dict): + def __init__(self, element, value={}): + self._element = element + dict.__init__(self, value) + for key, value in self.items(): + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) + else: + name = infosetFilter.coerceAttribute(key) + self._element._element.attrib[name] = value + + def __setitem__(self, key, value): + dict.__setitem__(self, key, value) + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) + else: + name = infosetFilter.coerceAttribute(key) + self._element._element.attrib[name] = value + + class Element(builder.Element): + def __init__(self, name, namespace): + name = infosetFilter.coerceElement(name) + builder.Element.__init__(self, name, namespace=namespace) + self._attributes = Attributes(self) + + def _setName(self, name): + self._name = infosetFilter.coerceElement(name) + self._element.tag = self._getETreeTag( + self._name, self._namespace) + + def _getName(self): + return infosetFilter.fromXmlName(self._name) + + name = property(_getName, _setName) + + def _getAttributes(self): + return self._attributes + + def _setAttributes(self, attributes): + self._attributes = Attributes(self, attributes) + + attributes = property(_getAttributes, _setAttributes) + + def insertText(self, data, insertBefore=None): + data = infosetFilter.coerceCharacters(data) + builder.Element.insertText(self, data, insertBefore) + + def appendChild(self, child): + builder.Element.appendChild(self, child) + + class Comment(builder.Comment): + def __init__(self, data): + data = infosetFilter.coerceComment(data) + builder.Comment.__init__(self, data) + + def _setData(self, data): + data = infosetFilter.coerceComment(data) + self._element.text = data + + def _getData(self): + return self._element.text + + data = property(_getData, _setData) + + self.elementClass = Element + self.commentClass = builder.Comment + # self.fragmentClass = builder.DocumentFragment + _base.TreeBuilder.__init__(self, namespaceHTMLElements) + + def reset(self): + _base.TreeBuilder.reset(self) + self.insertComment = self.insertCommentInitial + self.initial_comments = [] + self.doctype = None + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + if fullTree: + return self.document._elementTree + else: + return self.document._elementTree.getroot() + + def getFragment(self): + fragment = [] + element = self.openElements[0]._element + if element.text: + fragment.append(element.text) + fragment.extend(list(element)) + if element.tail: + fragment.append(element.tail) + return fragment + + def insertDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + + if not name: + warnings.warn("lxml cannot represent empty doctype", DataLossWarning) + self.doctype = None + else: + coercedName = self.infosetFilter.coerceElement(name) + if coercedName != name: + warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning) + + doctype = self.doctypeClass(coercedName, publicId, systemId) + self.doctype = doctype + + def insertCommentInitial(self, data, parent=None): + self.initial_comments.append(data) + + def insertCommentMain(self, data, parent=None): + if (parent == self.document and + self.document._elementTree.getroot()[-1].tag == comment_type): + warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) + super(TreeBuilder, self).insertComment(data, parent) + + def insertRoot(self, token): + """Create the document root""" + # Because of the way libxml2 works, it doesn't seem to be possible to + # alter information like the doctype after the tree has been parsed. + # Therefore we need to use the built-in parser to create our iniial + # tree, after which we can add elements like normal + docStr = "" + if self.doctype: + assert self.doctype.name + docStr += "<!DOCTYPE %s" % self.doctype.name + if (self.doctype.publicId is not None or + self.doctype.systemId is not None): + docStr += (' PUBLIC "%s" ' % + (self.infosetFilter.coercePubid(self.doctype.publicId or ""))) + if self.doctype.systemId: + sysid = self.doctype.systemId + if sysid.find("'") >= 0 and sysid.find('"') >= 0: + warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning) + sysid = sysid.replace("'", 'U00027') + if sysid.find("'") >= 0: + docStr += '"%s"' % sysid + else: + docStr += "'%s'" % sysid + else: + docStr += "''" + docStr += ">" + if self.doctype.name != token["name"]: + warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning) + docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>" + root = etree.fromstring(docStr) + + # Append the initial comments: + for comment_token in self.initial_comments: + root.addprevious(etree.Comment(comment_token["data"])) + + # Create the root document and add the ElementTree to it + self.document = self.documentClass() + self.document._elementTree = root.getroottree() + + # Give the root element the right name + name = token["name"] + namespace = token.get("namespace", self.defaultNamespace) + if namespace is None: + etree_tag = name + else: + etree_tag = "{%s}%s" % (namespace, name) + root.tag = etree_tag + + # Add the root element to the internal child/open data structures + root_element = self.elementClass(name, namespace) + root_element._element = root + self.document._childNodes.append(root_element) + self.openElements.append(root_element) + + # Reset to the default insert comment function + self.insertComment = self.insertCommentMain diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/__init__.py b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/__init__.py new file mode 100644 index 0000000000..18124e75f3 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/__init__.py @@ -0,0 +1,57 @@ +"""A collection of modules for iterating through different kinds of +tree, generating tokens identical to those produced by the tokenizer +module. + +To create a tree walker for a new type of tree, you need to do +implement a tree walker object (called TreeWalker by convention) that +implements a 'serialize' method taking a tree as sole argument and +returning an iterator generating tokens. +""" + +from __future__ import absolute_import, division, unicode_literals + +import sys + +from ..utils import default_etree + +treeWalkerCache = {} + + +def getTreeWalker(treeType, implementation=None, **kwargs): + """Get a TreeWalker class for various types of tree with built-in support + + treeType - the name of the tree type required (case-insensitive). Supported + values are: + + "dom" - The xml.dom.minidom DOM implementation + "pulldom" - The xml.dom.pulldom event stream + "etree" - A generic walker for tree implementations exposing an + elementtree-like interface (known to work with + ElementTree, cElementTree and lxml.etree). + "lxml" - Optimized walker for lxml.etree + "genshi" - a Genshi stream + + implementation - (Currently applies to the "etree" tree type only). A module + implementing the tree type e.g. xml.etree.ElementTree or + cElementTree.""" + + treeType = treeType.lower() + if treeType not in treeWalkerCache: + if treeType in ("dom", "pulldom"): + name = "%s.%s" % (__name__, treeType) + __import__(name) + mod = sys.modules[name] + treeWalkerCache[treeType] = mod.TreeWalker + elif treeType == "genshi": + from . import genshistream + treeWalkerCache[treeType] = genshistream.TreeWalker + elif treeType == "lxml": + from . import lxmletree + treeWalkerCache[treeType] = lxmletree.TreeWalker + elif treeType == "etree": + from . import etree + if implementation is None: + implementation = default_etree + # XXX: NEVER cache here, caching is done in the etree submodule + return etree.getETreeModule(implementation, **kwargs).TreeWalker + return treeWalkerCache.get(treeType) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/_base.py b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/_base.py new file mode 100644 index 0000000000..223ef21c87 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/_base.py @@ -0,0 +1,196 @@ +from __future__ import absolute_import, division, unicode_literals +from pip.vendor.six import text_type + +import gettext +_ = gettext.gettext + +from ..constants import voidElements, spaceCharacters +spaceCharacters = "".join(spaceCharacters) + + +class TreeWalker(object): + def __init__(self, tree): + self.tree = tree + + def __iter__(self): + raise NotImplementedError + + def error(self, msg): + return {"type": "SerializeError", "data": msg} + + def emptyTag(self, namespace, name, attrs, hasChildren=False): + assert namespace is None or isinstance(namespace, text_type), type(namespace) + assert isinstance(name, text_type), type(name) + assert all((namespace is None or isinstance(namespace, text_type)) and + isinstance(name, text_type) and + isinstance(value, text_type) + for (namespace, name), value in attrs.items()) + + yield {"type": "EmptyTag", "name": name, + "namespace": namespace, + "data": attrs} + if hasChildren: + yield self.error(_("Void element has children")) + + def startTag(self, namespace, name, attrs): + assert namespace is None or isinstance(namespace, text_type), type(namespace) + assert isinstance(name, text_type), type(name) + assert all((namespace is None or isinstance(namespace, text_type)) and + isinstance(name, text_type) and + isinstance(value, text_type) + for (namespace, name), value in attrs.items()) + + return {"type": "StartTag", + "name": name, + "namespace": namespace, + "data": attrs} + + def endTag(self, namespace, name): + assert namespace is None or isinstance(namespace, text_type), type(namespace) + assert isinstance(name, text_type), type(namespace) + + return {"type": "EndTag", + "name": name, + "namespace": namespace, + "data": {}} + + def text(self, data): + assert isinstance(data, text_type), type(data) + + data = data + middle = data.lstrip(spaceCharacters) + left = data[:len(data) - len(middle)] + if left: + yield {"type": "SpaceCharacters", "data": left} + data = middle + middle = data.rstrip(spaceCharacters) + right = data[len(middle):] + if middle: + yield {"type": "Characters", "data": middle} + if right: + yield {"type": "SpaceCharacters", "data": right} + + def comment(self, data): + assert isinstance(data, text_type), type(data) + + return {"type": "Comment", "data": data} + + def doctype(self, name, publicId=None, systemId=None, correct=True): + assert name is None or isinstance(name, text_type), type(name) + assert publicId is None or isinstance(publicId, text_type), type(publicId) + assert systemId is None or isinstance(systemId, text_type), type(systemId) + + return {"type": "Doctype", + "name": name if name is not None else "", + "publicId": publicId, + "systemId": systemId, + "correct": correct} + + def entity(self, name): + assert isinstance(name, text_type), type(name) + + return {"type": "Entity", "name": name} + + def unknown(self, nodeType): + return self.error(_("Unknown node type: ") + nodeType) + + +class RecursiveTreeWalker(TreeWalker): + def walkChildren(self, node): + raise NotImplementedError + + def element(self, node, namespace, name, attrs, hasChildren): + if name in voidElements: + for token in self.emptyTag(namespace, name, attrs, hasChildren): + yield token + else: + yield self.startTag(name, attrs) + if hasChildren: + for token in self.walkChildren(node): + yield token + yield self.endTag(name) + +from xml.dom import Node + +DOCUMENT = Node.DOCUMENT_NODE +DOCTYPE = Node.DOCUMENT_TYPE_NODE +TEXT = Node.TEXT_NODE +ELEMENT = Node.ELEMENT_NODE +COMMENT = Node.COMMENT_NODE +ENTITY = Node.ENTITY_NODE +UNKNOWN = "<#UNKNOWN#>" + + +class NonRecursiveTreeWalker(TreeWalker): + def getNodeDetails(self, node): + raise NotImplementedError + + def getFirstChild(self, node): + raise NotImplementedError + + def getNextSibling(self, node): + raise NotImplementedError + + def getParentNode(self, node): + raise NotImplementedError + + def __iter__(self): + currentNode = self.tree + while currentNode is not None: + details = self.getNodeDetails(currentNode) + type, details = details[0], details[1:] + hasChildren = False + + if type == DOCTYPE: + yield self.doctype(*details) + + elif type == TEXT: + for token in self.text(*details): + yield token + + elif type == ELEMENT: + namespace, name, attributes, hasChildren = details + if name in voidElements: + for token in self.emptyTag(namespace, name, attributes, + hasChildren): + yield token + hasChildren = False + else: + yield self.startTag(namespace, name, attributes) + + elif type == COMMENT: + yield self.comment(details[0]) + + elif type == ENTITY: + yield self.entity(details[0]) + + elif type == DOCUMENT: + hasChildren = True + + else: + yield self.unknown(details[0]) + + if hasChildren: + firstChild = self.getFirstChild(currentNode) + else: + firstChild = None + + if firstChild is not None: + currentNode = firstChild + else: + while currentNode is not None: + details = self.getNodeDetails(currentNode) + type, details = details[0], details[1:] + if type == ELEMENT: + namespace, name, attributes, hasChildren = details + if name not in voidElements: + yield self.endTag(namespace, name) + if self.tree is currentNode: + currentNode = None + break + nextSibling = self.getNextSibling(currentNode) + if nextSibling is not None: + currentNode = nextSibling + break + else: + currentNode = self.getParentNode(currentNode) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/dom.py b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/dom.py new file mode 100644 index 0000000000..a01287a944 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/dom.py @@ -0,0 +1,46 @@ +from __future__ import absolute_import, division, unicode_literals + +from xml.dom import Node + +import gettext +_ = gettext.gettext + +from . import _base + + +class TreeWalker(_base.NonRecursiveTreeWalker): + def getNodeDetails(self, node): + if node.nodeType == Node.DOCUMENT_TYPE_NODE: + return _base.DOCTYPE, node.name, node.publicId, node.systemId + + elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): + return _base.TEXT, node.nodeValue + + elif node.nodeType == Node.ELEMENT_NODE: + attrs = {} + for attr in list(node.attributes.keys()): + attr = node.getAttributeNode(attr) + if attr.namespaceURI: + attrs[(attr.namespaceURI, attr.localName)] = attr.value + else: + attrs[(None, attr.name)] = attr.value + return (_base.ELEMENT, node.namespaceURI, node.nodeName, + attrs, node.hasChildNodes()) + + elif node.nodeType == Node.COMMENT_NODE: + return _base.COMMENT, node.nodeValue + + elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): + return (_base.DOCUMENT,) + + else: + return _base.UNKNOWN, node.nodeType + + def getFirstChild(self, node): + return node.firstChild + + def getNextSibling(self, node): + return node.nextSibling + + def getParentNode(self, node): + return node.parentNode diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/etree.py b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/etree.py new file mode 100644 index 0000000000..8d19f7db55 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/etree.py @@ -0,0 +1,131 @@ +from __future__ import absolute_import, division, unicode_literals + +import gettext +_ = gettext.gettext + +import re + +from pip.vendor.six import text_type + +from . import _base +from ..utils import moduleFactoryFactory + +tag_regexp = re.compile("{([^}]*)}(.*)") + + +def getETreeBuilder(ElementTreeImplementation): + ElementTree = ElementTreeImplementation + ElementTreeCommentType = ElementTree.Comment("asd").tag + + class TreeWalker(_base.NonRecursiveTreeWalker): + """Given the particular ElementTree representation, this implementation, + to avoid using recursion, returns "nodes" as tuples with the following + content: + + 1. The current element + + 2. The index of the element relative to its parent + + 3. A stack of ancestor elements + + 4. A flag "text", "tail" or None to indicate if the current node is a + text node; either the text or tail of the current element (1) + """ + def getNodeDetails(self, node): + if isinstance(node, tuple): # It might be the root Element + elt, key, parents, flag = node + if flag in ("text", "tail"): + return _base.TEXT, getattr(elt, flag) + else: + node = elt + + if not(hasattr(node, "tag")): + node = node.getroot() + + if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): + return (_base.DOCUMENT,) + + elif node.tag == "<!DOCTYPE>": + return (_base.DOCTYPE, node.text, + node.get("publicId"), node.get("systemId")) + + elif node.tag == ElementTreeCommentType: + return _base.COMMENT, node.text + + else: + assert type(node.tag) == text_type, type(node.tag) + # This is assumed to be an ordinary element + match = tag_regexp.match(node.tag) + if match: + namespace, tag = match.groups() + else: + namespace = None + tag = node.tag + attrs = {} + for name, value in list(node.attrib.items()): + match = tag_regexp.match(name) + if match: + attrs[(match.group(1), match.group(2))] = value + else: + attrs[(None, name)] = value + return (_base.ELEMENT, namespace, tag, + attrs, len(node) or node.text) + + def getFirstChild(self, node): + if isinstance(node, tuple): + element, key, parents, flag = node + else: + element, key, parents, flag = node, None, [], None + + if flag in ("text", "tail"): + return None + else: + if element.text: + return element, key, parents, "text" + elif len(element): + parents.append(element) + return element[0], 0, parents, None + else: + return None + + def getNextSibling(self, node): + if isinstance(node, tuple): + element, key, parents, flag = node + else: + return None + + if flag == "text": + if len(element): + parents.append(element) + return element[0], 0, parents, None + else: + return None + else: + if element.tail and flag != "tail": + return element, key, parents, "tail" + elif key < len(parents[-1]) - 1: + return parents[-1][key + 1], key + 1, parents, None + else: + return None + + def getParentNode(self, node): + if isinstance(node, tuple): + element, key, parents, flag = node + else: + return None + + if flag == "text": + if not parents: + return element + else: + return element, key, parents, None + else: + parent = parents.pop() + if not parents: + return parent + else: + return parent, list(parents[-1]).index(parent), parents, None + + return locals() + +getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/genshistream.py b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/genshistream.py new file mode 100644 index 0000000000..f559c45d04 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/genshistream.py @@ -0,0 +1,69 @@ +from __future__ import absolute_import, division, unicode_literals + +from genshi.core import QName +from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT +from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT + +from . import _base + +from ..constants import voidElements, namespaces + + +class TreeWalker(_base.TreeWalker): + def __iter__(self): + # Buffer the events so we can pass in the following one + previous = None + for event in self.tree: + if previous is not None: + for token in self.tokens(previous, event): + yield token + previous = event + + # Don't forget the final event! + if previous is not None: + for token in self.tokens(previous, None): + yield token + + def tokens(self, event, next): + kind, data, pos = event + if kind == START: + tag, attribs = data + name = tag.localname + namespace = tag.namespace + converted_attribs = {} + for k, v in attribs: + if isinstance(k, QName): + converted_attribs[(k.namespace, k.localname)] = v + else: + converted_attribs[(None, k)] = v + + if namespace == namespaces["html"] and name in voidElements: + for token in self.emptyTag(namespace, name, converted_attribs, + not next or next[0] != END + or next[1] != tag): + yield token + else: + yield self.startTag(namespace, name, converted_attribs) + + elif kind == END: + name = data.localname + namespace = data.namespace + if name not in voidElements: + yield self.endTag(namespace, name) + + elif kind == COMMENT: + yield self.comment(data) + + elif kind == TEXT: + for token in self.text(data): + yield token + + elif kind == DOCTYPE: + yield self.doctype(*data) + + elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, + START_CDATA, END_CDATA, PI): + pass + + else: + yield self.unknown(kind) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/lxmletree.py b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/lxmletree.py new file mode 100644 index 0000000000..66969ee352 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/lxmletree.py @@ -0,0 +1,208 @@ +from __future__ import absolute_import, division, unicode_literals +from pip.vendor.six import text_type + +from lxml import etree +from ..treebuilders.etree import tag_regexp + +from gettext import gettext +_ = gettext + +from . import _base + +from .. import ihatexml + + +def ensure_str(s): + if s is None: + return None + elif isinstance(s, text_type): + return s + else: + return s.decode("utf-8", "strict") + + +class Root(object): + def __init__(self, et): + self.elementtree = et + self.children = [] + if et.docinfo.internalDTD: + self.children.append(Doctype(self, + ensure_str(et.docinfo.root_name), + ensure_str(et.docinfo.public_id), + ensure_str(et.docinfo.system_url))) + root = et.getroot() + node = root + + while node.getprevious() is not None: + node = node.getprevious() + while node is not None: + self.children.append(node) + node = node.getnext() + + self.text = None + self.tail = None + + def __getitem__(self, key): + return self.children[key] + + def getnext(self): + return None + + def __len__(self): + return 1 + + +class Doctype(object): + def __init__(self, root_node, name, public_id, system_id): + self.root_node = root_node + self.name = name + self.public_id = public_id + self.system_id = system_id + + self.text = None + self.tail = None + + def getnext(self): + return self.root_node.children[1] + + +class FragmentRoot(Root): + def __init__(self, children): + self.children = [FragmentWrapper(self, child) for child in children] + self.text = self.tail = None + + def getnext(self): + return None + + +class FragmentWrapper(object): + def __init__(self, fragment_root, obj): + self.root_node = fragment_root + self.obj = obj + if hasattr(self.obj, 'text'): + self.text = ensure_str(self.obj.text) + else: + self.text = None + if hasattr(self.obj, 'tail'): + self.tail = ensure_str(self.obj.tail) + else: + self.tail = None + self.isstring = isinstance(obj, str) or isinstance(obj, bytes) + # Support for bytes here is Py2 + if self.isstring: + self.obj = ensure_str(self.obj) + + def __getattr__(self, name): + return getattr(self.obj, name) + + def getnext(self): + siblings = self.root_node.children + idx = siblings.index(self) + if idx < len(siblings) - 1: + return siblings[idx + 1] + else: + return None + + def __getitem__(self, key): + return self.obj[key] + + def __bool__(self): + return bool(self.obj) + + def getparent(self): + return None + + def __str__(self): + return str(self.obj) + + def __unicode__(self): + return str(self.obj) + + def __len__(self): + return len(self.obj) + + +class TreeWalker(_base.NonRecursiveTreeWalker): + def __init__(self, tree): + if hasattr(tree, "getroot"): + tree = Root(tree) + elif isinstance(tree, list): + tree = FragmentRoot(tree) + _base.NonRecursiveTreeWalker.__init__(self, tree) + self.filter = ihatexml.InfosetFilter() + + def getNodeDetails(self, node): + if isinstance(node, tuple): # Text node + node, key = node + assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key + return _base.TEXT, ensure_str(getattr(node, key)) + + elif isinstance(node, Root): + return (_base.DOCUMENT,) + + elif isinstance(node, Doctype): + return _base.DOCTYPE, node.name, node.public_id, node.system_id + + elif isinstance(node, FragmentWrapper) and node.isstring: + return _base.TEXT, node.obj + + elif node.tag == etree.Comment: + return _base.COMMENT, ensure_str(node.text) + + elif node.tag == etree.Entity: + return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &; + + else: + # This is assumed to be an ordinary element + match = tag_regexp.match(ensure_str(node.tag)) + if match: + namespace, tag = match.groups() + else: + namespace = None + tag = ensure_str(node.tag) + attrs = {} + for name, value in list(node.attrib.items()): + name = ensure_str(name) + value = ensure_str(value) + match = tag_regexp.match(name) + if match: + attrs[(match.group(1), match.group(2))] = value + else: + attrs[(None, name)] = value + return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag), + attrs, len(node) > 0 or node.text) + + def getFirstChild(self, node): + assert not isinstance(node, tuple), _("Text nodes have no children") + + assert len(node) or node.text, "Node has no children" + if node.text: + return (node, "text") + else: + return node[0] + + def getNextSibling(self, node): + if isinstance(node, tuple): # Text node + node, key = node + assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key + if key == "text": + # XXX: we cannot use a "bool(node) and node[0] or None" construct here + # because node[0] might evaluate to False if it has no child element + if len(node): + return node[0] + else: + return None + else: # tail + return node.getnext() + + return (node, "tail") if node.tail else node.getnext() + + def getParentNode(self, node): + if isinstance(node, tuple): # Text node + node, key = node + assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key + if key == "text": + return node + # else: fallback to "normal" processing + + return node.getparent() diff --git a/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/pulldom.py b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/pulldom.py new file mode 100644 index 0000000000..0b0f515fec --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/treewalkers/pulldom.py @@ -0,0 +1,63 @@ +from __future__ import absolute_import, division, unicode_literals + +from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \ + COMMENT, IGNORABLE_WHITESPACE, CHARACTERS + +from . import _base + +from ..constants import voidElements + + +class TreeWalker(_base.TreeWalker): + def __iter__(self): + ignore_until = None + previous = None + for event in self.tree: + if previous is not None and \ + (ignore_until is None or previous[1] is ignore_until): + if previous[1] is ignore_until: + ignore_until = None + for token in self.tokens(previous, event): + yield token + if token["type"] == "EmptyTag": + ignore_until = previous[1] + previous = event + if ignore_until is None or previous[1] is ignore_until: + for token in self.tokens(previous, None): + yield token + elif ignore_until is not None: + raise ValueError("Illformed DOM event stream: void element without END_ELEMENT") + + def tokens(self, event, next): + type, node = event + if type == START_ELEMENT: + name = node.nodeName + namespace = node.namespaceURI + attrs = {} + for attr in list(node.attributes.keys()): + attr = node.getAttributeNode(attr) + attrs[(attr.namespaceURI, attr.localName)] = attr.value + if name in voidElements: + for token in self.emptyTag(namespace, + name, + attrs, + not next or next[1] is not node): + yield token + else: + yield self.startTag(namespace, name, attrs) + + elif type == END_ELEMENT: + name = node.nodeName + namespace = node.namespaceURI + if name not in voidElements: + yield self.endTag(namespace, name) + + elif type == COMMENT: + yield self.comment(node.nodeValue) + + elif type in (IGNORABLE_WHITESPACE, CHARACTERS): + for token in self.text(node.nodeValue): + yield token + + else: + yield self.unknown(type) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/trie/__init__.py b/awx/lib/site-packages/pip/vendor/html5lib/trie/__init__.py new file mode 100644 index 0000000000..a8cca8a9ac --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/trie/__init__.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, unicode_literals + +from .py import Trie as PyTrie + +Trie = PyTrie + +try: + from .datrie import Trie as DATrie +except ImportError: + pass +else: + Trie = DATrie diff --git a/awx/lib/site-packages/pip/vendor/html5lib/trie/_base.py b/awx/lib/site-packages/pip/vendor/html5lib/trie/_base.py new file mode 100644 index 0000000000..724486b16e --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/trie/_base.py @@ -0,0 +1,37 @@ +from __future__ import absolute_import, division, unicode_literals + +from collections import Mapping + + +class Trie(Mapping): + """Abstract base class for tries""" + + def keys(self, prefix=None): + keys = super().keys() + + if prefix is None: + return set(keys) + + # Python 2.6: no set comprehensions + return set([x for x in keys if x.startswith(prefix)]) + + def has_keys_with_prefix(self, prefix): + for key in self.keys(): + if key.startswith(prefix): + return True + + return False + + def longest_prefix(self, prefix): + if prefix in self: + return prefix + + for i in range(1, len(prefix) + 1): + if prefix[:-i] in self: + return prefix[:-i] + + raise KeyError(prefix) + + def longest_prefix_item(self, prefix): + lprefix = self.longest_prefix(prefix) + return (lprefix, self[lprefix]) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/trie/datrie.py b/awx/lib/site-packages/pip/vendor/html5lib/trie/datrie.py new file mode 100644 index 0000000000..1c3b43202c --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/trie/datrie.py @@ -0,0 +1,44 @@ +from __future__ import absolute_import, division, unicode_literals + +from datrie import Trie as DATrie +from pip.vendor.six import text_type + +from ._base import Trie as ABCTrie + + +class Trie(ABCTrie): + def __init__(self, data): + chars = set() + for key in data.keys(): + if not isinstance(key, text_type): + raise TypeError("All keys must be strings") + for char in key: + chars.add(char) + + self._data = DATrie("".join(chars)) + for key, value in data.items(): + self._data[key] = value + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + raise NotImplementedError() + + def __getitem__(self, key): + return self._data[key] + + def keys(self, prefix=None): + return self._data.keys(prefix) + + def has_keys_with_prefix(self, prefix): + return self._data.has_keys_with_prefix(prefix) + + def longest_prefix(self, prefix): + return self._data.longest_prefix(prefix) + + def longest_prefix_item(self, prefix): + return self._data.longest_prefix_item(prefix) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/trie/py.py b/awx/lib/site-packages/pip/vendor/html5lib/trie/py.py new file mode 100644 index 0000000000..f2d3641134 --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/trie/py.py @@ -0,0 +1,67 @@ +from __future__ import absolute_import, division, unicode_literals +from pip.vendor.six import text_type + +from bisect import bisect_left + +from ._base import Trie as ABCTrie + + +class Trie(ABCTrie): + def __init__(self, data): + if not all(isinstance(x, text_type) for x in data.keys()): + raise TypeError("All keys must be strings") + + self._data = data + self._keys = sorted(data.keys()) + self._cachestr = "" + self._cachepoints = (0, len(data)) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + def keys(self, prefix=None): + if prefix is None or prefix == "" or not self._keys: + return set(self._keys) + + if prefix.startswith(self._cachestr): + lo, hi = self._cachepoints + start = i = bisect_left(self._keys, prefix, lo, hi) + else: + start = i = bisect_left(self._keys, prefix) + + keys = set() + if start == len(self._keys): + return keys + + while self._keys[i].startswith(prefix): + keys.add(self._keys[i]) + i += 1 + + self._cachestr = prefix + self._cachepoints = (start, i) + + return keys + + def has_keys_with_prefix(self, prefix): + if prefix in self._data: + return True + + if prefix.startswith(self._cachestr): + lo, hi = self._cachepoints + i = bisect_left(self._keys, prefix, lo, hi) + else: + i = bisect_left(self._keys, prefix) + + if i == len(self._keys): + return False + + return self._keys[i].startswith(prefix) diff --git a/awx/lib/site-packages/pip/vendor/html5lib/utils.py b/awx/lib/site-packages/pip/vendor/html5lib/utils.py new file mode 100644 index 0000000000..4e8559db6f --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/html5lib/utils.py @@ -0,0 +1,78 @@ +from __future__ import absolute_import, division, unicode_literals + +from types import ModuleType + +try: + import xml.etree.cElementTree as default_etree +except ImportError: + import xml.etree.ElementTree as default_etree + + +class MethodDispatcher(dict): + """Dict with 2 special properties: + + On initiation, keys that are lists, sets or tuples are converted to + multiple keys so accessing any one of the items in the original + list-like object returns the matching value + + md = MethodDispatcher({("foo", "bar"):"baz"}) + md["foo"] == "baz" + + A default value which can be set through the default attribute. + """ + + def __init__(self, items=()): + # Using _dictEntries instead of directly assigning to self is about + # twice as fast. Please do careful performance testing before changing + # anything here. + _dictEntries = [] + for name, value in items: + if type(name) in (list, tuple, frozenset, set): + for item in name: + _dictEntries.append((item, value)) + else: + _dictEntries.append((name, value)) + dict.__init__(self, _dictEntries) + self.default = None + + def __getitem__(self, key): + return dict.get(self, key, self.default) + + +# Some utility functions to dal with weirdness around UCS2 vs UCS4 +# python builds + +def isSurrogatePair(data): + return (len(data) == 2 and + ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and + ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF) + + +def surrogatePairToCodepoint(data): + char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 + + (ord(data[1]) - 0xDC00)) + return char_val + +# Module Factory Factory (no, this isn't Java, I know) +# Here to stop this being duplicated all over the place. + + +def moduleFactoryFactory(factory): + moduleCache = {} + + def moduleFactory(baseModule, *args, **kwargs): + if isinstance(ModuleType.__name__, type("")): + name = "_%s_factory" % baseModule.__name__ + else: + name = b"_%s_factory" % baseModule.__name__ + + if name in moduleCache: + return moduleCache[name] + else: + mod = ModuleType(name) + objs = factory(baseModule, *args, **kwargs) + mod.__dict__.update(objs) + moduleCache[name] = mod + return mod + + return moduleFactory diff --git a/awx/lib/site-packages/pip/vendor/six.py b/awx/lib/site-packages/pip/vendor/six.py new file mode 100644 index 0000000000..eae31454ae --- /dev/null +++ b/awx/lib/site-packages/pip/vendor/six.py @@ -0,0 +1,404 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2013 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in +# the Software without restriction, including without limitation the rights to +# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +# the Software, and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import operator +import sys +import types + +__author__ = "Benjamin Peterson <benjamin@python.org>" +__version__ = "1.3.0" + + +# True if we are running on Python 3. +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) + # This is a bit ugly, but it avoids running this again. + delattr(tp, self.name) + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + + +class _MovedItems(types.ModuleType): + """Lazy loading of moved objects""" + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("reload_module", "__builtin__", "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("winreg", "_winreg"), +] +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) +del attr + +moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" + + _iterkeys = "keys" + _itervalues = "values" + _iteritems = "items" + _iterlists = "lists" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + _iterkeys = "iterkeys" + _itervalues = "itervalues" + _iteritems = "iteritems" + _iterlists = "iterlists" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +def iterkeys(d, **kw): + """Return an iterator over the keys of a dictionary.""" + return iter(getattr(d, _iterkeys)(**kw)) + +def itervalues(d, **kw): + """Return an iterator over the values of a dictionary.""" + return iter(getattr(d, _itervalues)(**kw)) + +def iteritems(d, **kw): + """Return an iterator over the (key, value) pairs of a dictionary.""" + return iter(getattr(d, _iteritems)(**kw)) + +def iterlists(d, **kw): + """Return an iterator over the (key, [values]) pairs of a dictionary.""" + return iter(getattr(d, _iterlists)(**kw)) + + +if PY3: + def b(s): + return s.encode("latin-1") + def u(s): + return s + if sys.version_info[1] <= 1: + def int2byte(i): + return bytes((i,)) + else: + # This is about 2x faster than the implementation above on 3.2+ + int2byte = operator.methodcaller("to_bytes", 1, "big") + import io + StringIO = io.StringIO + BytesIO = io.BytesIO +else: + def b(s): + return s + def u(s): + return unicode(s, "unicode_escape") + int2byte = chr + import StringIO + StringIO = BytesIO = StringIO.StringIO +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +if PY3: + import builtins + exec_ = getattr(builtins, "exec") + + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + + print_ = getattr(builtins, "print") + del builtins + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + + def print_(*args, **kwargs): + """The new-style print function.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + def write(data): + if not isinstance(data, basestring): + data = str(data) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) + +_add_doc(reraise, """Reraise an exception.""") + + +def with_metaclass(meta, base=object): + """Create a base class with a metaclass.""" + return meta("NewBase", (base,), {}) diff --git a/awx/lib/site-packages/pip/wheel.py b/awx/lib/site-packages/pip/wheel.py new file mode 100644 index 0000000000..bd22a89f4b --- /dev/null +++ b/awx/lib/site-packages/pip/wheel.py @@ -0,0 +1,335 @@ +""" +Support for installing and building the "wheel" binary package format. +""" +from __future__ import with_statement + +import csv +import functools +import hashlib +import os +import pkg_resources +import re +import shutil +import sys +from base64 import urlsafe_b64encode + +from pip.locations import distutils_scheme +from pip.log import logger +from pip import pep425tags +from pip.util import call_subprocess, normalize_path, make_path_relative + +wheel_ext = '.whl' +# don't use pkg_resources.Requirement.parse, to avoid the override in distribute, +# that converts 'setuptools' to 'distribute'. +setuptools_requirement = list(pkg_resources.parse_requirements("setuptools>=0.8"))[0] + +def wheel_setuptools_support(): + """ + Return True if we have a setuptools that supports wheel. + """ + fulfilled = False + try: + installed_setuptools = pkg_resources.get_distribution('setuptools') + if installed_setuptools in setuptools_requirement: + fulfilled = True + except pkg_resources.DistributionNotFound: + pass + if not fulfilled: + logger.warn("%s is required for wheel installs." % setuptools_requirement) + return fulfilled + +def rehash(path, algo='sha256', blocksize=1<<20): + """Return (hash, length) for path using hashlib.new(algo)""" + h = hashlib.new(algo) + length = 0 + with open(path) as f: + block = f.read(blocksize) + while block: + length += len(block) + h.update(block) + block = f.read(blocksize) + digest = 'sha256='+urlsafe_b64encode(h.digest()).decode('latin1').rstrip('=') + return (digest, length) + +try: + unicode + def binary(s): + if isinstance(s, unicode): + return s.encode('ascii') + return s +except NameError: + def binary(s): + if isinstance(s, str): + return s.encode('ascii') + +def open_for_csv(name, mode): + if sys.version_info[0] < 3: + nl = {} + bin = 'b' + else: + nl = { 'newline': '' } + bin = '' + return open(name, mode + bin, **nl) + +def fix_script(path): + """Replace #!python with #!/path/to/python + Return True if file was changed.""" + # XXX RECORD hashes will need to be updated + if os.path.isfile(path): + script = open(path, 'rb') + try: + firstline = script.readline() + if not firstline.startswith(binary('#!python')): + return False + exename = sys.executable.encode(sys.getfilesystemencoding()) + firstline = binary('#!') + exename + binary(os.linesep) + rest = script.read() + finally: + script.close() + script = open(path, 'wb') + try: + script.write(firstline) + script.write(rest) + finally: + script.close() + return True + +dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?) + \.dist-info$""", re.VERBOSE) + +def root_is_purelib(name, wheeldir): + """ + Return True if the extracted wheel in wheeldir should go into purelib. + """ + name_folded = name.replace("-", "_") + for item in os.listdir(wheeldir): + match = dist_info_re.match(item) + if match and match.group('name') == name_folded: + with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel: + for line in wheel: + line = line.lower().rstrip() + if line == "root-is-purelib: true": + return True + return False + +def move_wheel_files(name, req, wheeldir, user=False, home=None): + """Install a wheel""" + + scheme = distutils_scheme(name, user=user, home=home) + + if root_is_purelib(name, wheeldir): + lib_dir = scheme['purelib'] + else: + lib_dir = scheme['platlib'] + + info_dir = [] + data_dirs = [] + source = wheeldir.rstrip(os.path.sep) + os.path.sep + installed = {} + changed = set() + + def normpath(src, p): + return make_path_relative(src, p).replace(os.path.sep, '/') + + def record_installed(srcfile, destfile, modified=False): + """Map archive RECORD paths to installation RECORD paths.""" + oldpath = normpath(srcfile, wheeldir) + newpath = normpath(destfile, lib_dir) + installed[oldpath] = newpath + if modified: + changed.add(destfile) + + def clobber(source, dest, is_base, fixer=None): + if not os.path.exists(dest): # common for the 'include' path + os.makedirs(dest) + + for dir, subdirs, files in os.walk(source): + basedir = dir[len(source):].lstrip(os.path.sep) + if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'): + continue + for s in subdirs: + destsubdir = os.path.join(dest, basedir, s) + if is_base and basedir == '' and destsubdir.endswith('.data'): + data_dirs.append(s) + continue + elif (is_base + and s.endswith('.dist-info') + # is self.req.project_name case preserving? + and s.lower().startswith(req.project_name.replace('-', '_').lower())): + assert not info_dir, 'Multiple .dist-info directories' + info_dir.append(destsubdir) + if not os.path.exists(destsubdir): + os.makedirs(destsubdir) + for f in files: + srcfile = os.path.join(dir, f) + destfile = os.path.join(dest, basedir, f) + shutil.move(srcfile, destfile) + changed = False + if fixer: + changed = fixer(destfile) + record_installed(srcfile, destfile, changed) + + clobber(source, lib_dir, True) + + assert info_dir, "%s .dist-info directory not found" % req + + for datadir in data_dirs: + fixer = None + for subdir in os.listdir(os.path.join(wheeldir, datadir)): + fixer = None + if subdir == 'scripts': + fixer = fix_script + source = os.path.join(wheeldir, datadir, subdir) + dest = scheme[subdir] + clobber(source, dest, False, fixer=fixer) + + record = os.path.join(info_dir[0], 'RECORD') + temp_record = os.path.join(info_dir[0], 'RECORD.pip') + with open_for_csv(record, 'r') as record_in: + with open_for_csv(temp_record, 'w+') as record_out: + reader = csv.reader(record_in) + writer = csv.writer(record_out) + for row in reader: + row[0] = installed.pop(row[0], row[0]) + if row[0] in changed: + row[1], row[2] = rehash(row[0]) + writer.writerow(row) + for f in installed: + writer.writerow((installed[f], '', '')) + shutil.move(temp_record, record) + +def _unique(fn): + @functools.wraps(fn) + def unique(*args, **kw): + seen = set() + for item in fn(*args, **kw): + if item not in seen: + seen.add(item) + yield item + return unique + +# TODO: this goes somewhere besides the wheel module +@_unique +def uninstallation_paths(dist): + """ + Yield all the uninstallation paths for dist based on RECORD-without-.pyc + + Yield paths to all the files in RECORD. For each .py file in RECORD, add + the .pyc in the same directory. + + UninstallPathSet.add() takes care of the __pycache__ .pyc. + """ + from pip.req import FakeFile # circular import + r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD'))) + for row in r: + path = os.path.join(dist.location, row[0]) + yield path + if path.endswith('.py'): + dn, fn = os.path.split(path) + base = fn[:-3] + path = os.path.join(dn, base+'.pyc') + yield path + + +class Wheel(object): + """A wheel file""" + + # TODO: maybe move the install code into this class + + wheel_file_re = re.compile( + r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?) + ((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?) + \.whl|\.dist-info)$""", + re.VERBOSE) + + def __init__(self, filename): + wheel_info = self.wheel_file_re.match(filename) + self.filename = filename + self.name = wheel_info.group('name').replace('_', '-') + self.version = wheel_info.group('ver') + self.pyversions = wheel_info.group('pyver').split('.') + self.abis = wheel_info.group('abi').split('.') + self.plats = wheel_info.group('plat').split('.') + + # All the tag combinations from this file + self.file_tags = set((x, y, z) for x in self.pyversions for y + in self.abis for z in self.plats) + + def support_index_min(self, tags=None): + """ + Return the lowest index that a file_tag achieves in the supported_tags list + e.g. if there are 8 supported tags, and one of the file tags is first in the + list, then return 0. + """ + if tags is None: # for mock + tags = pep425tags.supported_tags + indexes = [tags.index(c) for c in self.file_tags if c in tags] + return min(indexes) if indexes else None + + def supported(self, tags=None): + """Is this wheel supported on this system?""" + if tags is None: # for mock + tags = pep425tags.supported_tags + return bool(set(tags).intersection(self.file_tags)) + + +class WheelBuilder(object): + """Build wheels from a RequirementSet.""" + + def __init__(self, requirement_set, finder, wheel_dir, build_options=[], global_options=[]): + self.requirement_set = requirement_set + self.finder = finder + self.wheel_dir = normalize_path(wheel_dir) + self.build_options = build_options + self.global_options = global_options + + def _build_one(self, req): + """Build one wheel.""" + + base_args = [ + sys.executable, '-c', + "import setuptools;__file__=%r;"\ + "exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % req.setup_py] + \ + list(self.global_options) + + logger.notify('Running setup.py bdist_wheel for %s' % req.name) + logger.notify('Destination directory: %s' % self.wheel_dir) + wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] + self.build_options + try: + call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False) + return True + except: + logger.error('Failed building wheel for %s' % req.name) + return False + + def build(self): + """Build wheels.""" + + #unpack and constructs req set + self.requirement_set.prepare_files(self.finder) + + reqset = self.requirement_set.requirements.values() + + #make the wheelhouse + if not os.path.exists(self.wheel_dir): + os.makedirs(self.wheel_dir) + + #build the wheels + logger.notify('Building wheels for collected packages: %s' % ', '.join([req.name for req in reqset])) + logger.indent += 2 + build_success, build_failure = [], [] + for req in reqset: + if req.is_wheel: + logger.notify("Skipping building wheel: %s", req.url) + continue + if self._build_one(req): + build_success.append(req) + else: + build_failure.append(req) + logger.indent -= 2 + + #notify sucess/failure + if build_success: + logger.notify('Successfully built %s' % ' '.join([req.name for req in build_success])) + if build_failure: + logger.notify('Failed to build %s' % ' '.join([req.name for req in build_failure])) diff --git a/awx/lib/site-packages/pkg_resources.py b/awx/lib/site-packages/pkg_resources.py new file mode 100644 index 0000000000..0297601629 --- /dev/null +++ b/awx/lib/site-packages/pkg_resources.py @@ -0,0 +1,2815 @@ +"""Package resource API +-------------------- + +A resource is a logical file contained within a package, or a logical +subdirectory thereof. The package resource API expects resource names +to have their path parts separated with ``/``, *not* whatever the local +path separator is. Do not use os.path operations to manipulate resource +names being passed into the API. + +The package resource API is designed to work with normal filesystem packages, +.egg files, and unpacked .egg files. It can also work in a limited way with +.zip files and with custom PEP 302 loaders that support the ``get_data()`` +method. +""" + +import sys +import os +import time +import re +import imp +import zipfile +import zipimport +import warnings +import stat +try: + from urlparse import urlparse, urlunparse +except ImportError: + from urllib.parse import urlparse, urlunparse + +try: + frozenset +except NameError: + from sets import ImmutableSet as frozenset +try: + basestring + next = lambda o: o.next() + from cStringIO import StringIO as BytesIO + def exec_(code, globs=None, locs=None): + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") +except NameError: + basestring = str + from io import BytesIO + exec_ = eval("exec") + def execfile(fn, globs=None, locs=None): + if globs is None: + globs = globals() + if locs is None: + locs = globs + exec_(compile(open(fn).read(), fn, 'exec'), globs, locs) + import functools + reduce = functools.reduce + +# capture these to bypass sandboxing +from os import utime +try: + from os import mkdir, rename, unlink + WRITE_SUPPORT = True +except ImportError: + # no write support, probably under GAE + WRITE_SUPPORT = False + +from os import open as os_open +from os.path import isdir, split + +# Avoid try/except due to potential problems with delayed import mechanisms. +if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": + import importlib._bootstrap as importlib_bootstrap +else: + importlib_bootstrap = None + +try: + import parser +except ImportError: + pass + +def _bypass_ensure_directory(name, mode=0x1FF): # 0777 + # Sandbox-bypassing version of ensure_directory() + if not WRITE_SUPPORT: + raise IOError('"os.mkdir" not supported on this platform.') + dirname, filename = split(name) + if dirname and filename and not isdir(dirname): + _bypass_ensure_directory(dirname) + mkdir(dirname, mode) + + +_state_vars = {} + +def _declare_state(vartype, **kw): + g = globals() + for name, val in kw.items(): + g[name] = val + _state_vars[name] = vartype + +def __getstate__(): + state = {} + g = globals() + for k, v in _state_vars.items(): + state[k] = g['_sget_'+v](g[k]) + return state + +def __setstate__(state): + g = globals() + for k, v in state.items(): + g['_sset_'+_state_vars[k]](k, g[k], v) + return state + +def _sget_dict(val): + return val.copy() + +def _sset_dict(key, ob, state): + ob.clear() + ob.update(state) + +def _sget_object(val): + return val.__getstate__() + +def _sset_object(key, ob, state): + ob.__setstate__(state) + +_sget_none = _sset_none = lambda *args: None + + +def get_supported_platform(): + """Return this platform's maximum compatible version. + + distutils.util.get_platform() normally reports the minimum version + of Mac OS X that would be required to *use* extensions produced by + distutils. But what we want when checking compatibility is to know the + version of Mac OS X that we are *running*. To allow usage of packages that + explicitly require a newer version of Mac OS X, we must also know the + current version of the OS. + + If this condition occurs for any other platform with a version in its + platform strings, this function should be extended accordingly. + """ + plat = get_build_platform() + m = macosVersionString.match(plat) + if m is not None and sys.platform == "darwin": + try: + plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) + except ValueError: + pass # not Mac OS X + return plat + +__all__ = [ + # Basic resource access and distribution/entry point discovery + 'require', 'run_script', 'get_provider', 'get_distribution', + 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', + 'resource_string', 'resource_stream', 'resource_filename', + 'resource_listdir', 'resource_exists', 'resource_isdir', + + # Environmental control + 'declare_namespace', 'working_set', 'add_activation_listener', + 'find_distributions', 'set_extraction_path', 'cleanup_resources', + 'get_default_cache', + + # Primary implementation classes + 'Environment', 'WorkingSet', 'ResourceManager', + 'Distribution', 'Requirement', 'EntryPoint', + + # Exceptions + 'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra', + 'ExtractionError', + + # Parsing functions and string utilities + 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', + 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', + 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', + + # filesystem utilities + 'ensure_directory', 'normalize_path', + + # Distribution "precedence" constants + 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', + + # "Provider" interfaces, implementations, and registration/lookup APIs + 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', + 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', + 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', + 'register_finder', 'register_namespace_handler', 'register_loader_type', + 'fixup_namespace_packages', 'get_importer', + + # Deprecated/backward compatibility only + 'run_main', 'AvailableDistributions', +] + +class ResolutionError(Exception): + """Abstract base for dependency resolution errors""" + def __repr__(self): + return self.__class__.__name__+repr(self.args) + +class VersionConflict(ResolutionError): + """An already-installed version conflicts with the requested version""" + +class DistributionNotFound(ResolutionError): + """A requested distribution was not found""" + +class UnknownExtra(ResolutionError): + """Distribution doesn't have an "extra feature" of the given name""" +_provider_factories = {} + +PY_MAJOR = sys.version[:3] +EGG_DIST = 3 +BINARY_DIST = 2 +SOURCE_DIST = 1 +CHECKOUT_DIST = 0 +DEVELOP_DIST = -1 + +def register_loader_type(loader_type, provider_factory): + """Register `provider_factory` to make providers for `loader_type` + + `loader_type` is the type or class of a PEP 302 ``module.__loader__``, + and `provider_factory` is a function that, passed a *module* object, + returns an ``IResourceProvider`` for that module. + """ + _provider_factories[loader_type] = provider_factory + +def get_provider(moduleOrReq): + """Return an IResourceProvider for the named module or requirement""" + if isinstance(moduleOrReq,Requirement): + return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] + try: + module = sys.modules[moduleOrReq] + except KeyError: + __import__(moduleOrReq) + module = sys.modules[moduleOrReq] + loader = getattr(module, '__loader__', None) + return _find_adapter(_provider_factories, loader)(module) + +def _macosx_vers(_cache=[]): + if not _cache: + import platform + version = platform.mac_ver()[0] + # fallback for MacPorts + if version == '': + import plistlib + plist = '/System/Library/CoreServices/SystemVersion.plist' + if os.path.exists(plist): + if hasattr(plistlib, 'readPlist'): + plist_content = plistlib.readPlist(plist) + if 'ProductVersion' in plist_content: + version = plist_content['ProductVersion'] + + _cache.append(version.split('.')) + return _cache[0] + +def _macosx_arch(machine): + return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine) + +def get_build_platform(): + """Return this platform's string for platform-specific distributions + + XXX Currently this is the same as ``distutils.util.get_platform()``, but it + needs some hacks for Linux and Mac OS X. + """ + try: + # Python 2.7 or >=3.2 + from sysconfig import get_platform + except ImportError: + from distutils.util import get_platform + + plat = get_platform() + if sys.platform == "darwin" and not plat.startswith('macosx-'): + try: + version = _macosx_vers() + machine = os.uname()[4].replace(" ", "_") + return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), + _macosx_arch(machine)) + except ValueError: + # if someone is running a non-Mac darwin system, this will fall + # through to the default implementation + pass + return plat + +macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") +darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") +get_platform = get_build_platform # XXX backward compat + + +def compatible_platforms(provided,required): + """Can code for the `provided` platform run on the `required` platform? + + Returns true if either platform is ``None``, or the platforms are equal. + + XXX Needs compatibility checks for Linux and other unixy OSes. + """ + if provided is None or required is None or provided==required: + return True # easy case + + # Mac OS X special cases + reqMac = macosVersionString.match(required) + if reqMac: + provMac = macosVersionString.match(provided) + + # is this a Mac package? + if not provMac: + # this is backwards compatibility for packages built before + # setuptools 0.6. All packages built after this point will + # use the new macosx designation. + provDarwin = darwinVersionString.match(provided) + if provDarwin: + dversion = int(provDarwin.group(1)) + macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) + if dversion == 7 and macosversion >= "10.3" or \ + dversion == 8 and macosversion >= "10.4": + + #import warnings + #warnings.warn("Mac eggs should be rebuilt to " + # "use the macosx designation instead of darwin.", + # category=DeprecationWarning) + return True + return False # egg isn't macosx or legacy darwin + + # are they the same major version and machine type? + if provMac.group(1) != reqMac.group(1) or \ + provMac.group(3) != reqMac.group(3): + return False + + # is the required OS major update >= the provided one? + if int(provMac.group(2)) > int(reqMac.group(2)): + return False + + return True + + # XXX Linux and other platforms' special cases should go here + return False + + +def run_script(dist_spec, script_name): + """Locate distribution `dist_spec` and run its `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + require(dist_spec)[0].run_script(script_name, ns) + +run_main = run_script # backward compatibility + +def get_distribution(dist): + """Return a current distribution object for a Requirement or string""" + if isinstance(dist,basestring): dist = Requirement.parse(dist) + if isinstance(dist,Requirement): dist = get_provider(dist) + if not isinstance(dist,Distribution): + raise TypeError("Expected string, Requirement, or Distribution", dist) + return dist + +def load_entry_point(dist, group, name): + """Return `name` entry point of `group` for `dist` or raise ImportError""" + return get_distribution(dist).load_entry_point(group, name) + +def get_entry_map(dist, group=None): + """Return the entry point map for `group`, or the full entry map""" + return get_distribution(dist).get_entry_map(group) + +def get_entry_info(dist, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return get_distribution(dist).get_entry_info(group, name) + + +class IMetadataProvider: + + def has_metadata(name): + """Does the package's distribution contain the named metadata?""" + + def get_metadata(name): + """The named metadata resource as a string""" + + def get_metadata_lines(name): + """Yield named metadata resource as list of non-blank non-comment lines + + Leading and trailing whitespace is stripped from each line, and lines + with ``#`` as the first non-blank character are omitted.""" + + def metadata_isdir(name): + """Is the named metadata a directory? (like ``os.path.isdir()``)""" + + def metadata_listdir(name): + """List of metadata names in the directory (like ``os.listdir()``)""" + + def run_script(script_name, namespace): + """Execute the named script in the supplied namespace dictionary""" + + +class IResourceProvider(IMetadataProvider): + """An object that provides access to package resources""" + + def get_resource_filename(manager, resource_name): + """Return a true filesystem path for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_stream(manager, resource_name): + """Return a readable file-like object for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_string(manager, resource_name): + """Return a string containing the contents of `resource_name` + + `manager` must be an ``IResourceManager``""" + + def has_resource(resource_name): + """Does the package contain the named resource?""" + + def resource_isdir(resource_name): + """Is the named resource a directory? (like ``os.path.isdir()``)""" + + def resource_listdir(resource_name): + """List of resource names in the directory (like ``os.listdir()``)""" + + +class WorkingSet(object): + """A collection of active distributions on sys.path (or a similar list)""" + + def __init__(self, entries=None): + """Create working set from list of path entries (default=sys.path)""" + self.entries = [] + self.entry_keys = {} + self.by_key = {} + self.callbacks = [] + + if entries is None: + entries = sys.path + + for entry in entries: + self.add_entry(entry) + + def add_entry(self, entry): + """Add a path item to ``.entries``, finding any distributions on it + + ``find_distributions(entry, True)`` is used to find distributions + corresponding to the path entry, and they are added. `entry` is + always appended to ``.entries``, even if it is already present. + (This is because ``sys.path`` can contain the same value more than + once, and the ``.entries`` of the ``sys.path`` WorkingSet should always + equal ``sys.path``.) + """ + self.entry_keys.setdefault(entry, []) + self.entries.append(entry) + for dist in find_distributions(entry, True): + self.add(dist, entry, False) + + def __contains__(self,dist): + """True if `dist` is the active distribution for its project""" + return self.by_key.get(dist.key) == dist + + def find(self, req): + """Find a distribution matching requirement `req` + + If there is an active distribution for the requested project, this + returns it as long as it meets the version requirement specified by + `req`. But, if there is an active distribution for the project and it + does *not* meet the `req` requirement, ``VersionConflict`` is raised. + If there is no active distribution for the requested project, ``None`` + is returned. + """ + dist = self.by_key.get(req.key) + if dist is not None and dist not in req: + raise VersionConflict(dist,req) # XXX add more info + else: + return dist + + def iter_entry_points(self, group, name=None): + """Yield entry point objects from `group` matching `name` + + If `name` is None, yields all entry points in `group` from all + distributions in the working set, otherwise only ones matching + both `group` and `name` are yielded (in distribution order). + """ + for dist in self: + entries = dist.get_entry_map(group) + if name is None: + for ep in entries.values(): + yield ep + elif name in entries: + yield entries[name] + + def run_script(self, requires, script_name): + """Locate distribution for `requires` and run `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + self.require(requires)[0].run_script(script_name, ns) + + def __iter__(self): + """Yield distributions for non-duplicate projects in the working set + + The yield order is the order in which the items' path entries were + added to the working set. + """ + seen = {} + for item in self.entries: + if item not in self.entry_keys: + # workaround a cache issue + continue + + for key in self.entry_keys[item]: + if key not in seen: + seen[key]=1 + yield self.by_key[key] + + def add(self, dist, entry=None, insert=True): + """Add `dist` to working set, associated with `entry` + + If `entry` is unspecified, it defaults to the ``.location`` of `dist`. + On exit from this routine, `entry` is added to the end of the working + set's ``.entries`` (if it wasn't already present). + + `dist` is only added to the working set if it's for a project that + doesn't already have a distribution in the set. If it's added, any + callbacks registered with the ``subscribe()`` method will be called. + """ + if insert: + dist.insert_on(self.entries, entry) + + if entry is None: + entry = dist.location + keys = self.entry_keys.setdefault(entry,[]) + keys2 = self.entry_keys.setdefault(dist.location,[]) + if dist.key in self.by_key: + return # ignore hidden distros + + self.by_key[dist.key] = dist + if dist.key not in keys: + keys.append(dist.key) + if dist.key not in keys2: + keys2.append(dist.key) + self._added_new(dist) + + def resolve(self, requirements, env=None, installer=None): + """List all distributions needed to (recursively) meet `requirements` + + `requirements` must be a sequence of ``Requirement`` objects. `env`, + if supplied, should be an ``Environment`` instance. If + not supplied, it defaults to all distributions available within any + entry or distribution in the working set. `installer`, if supplied, + will be invoked with each requirement that cannot be met by an + already-installed distribution; it should return a ``Distribution`` or + ``None``. + """ + + requirements = list(requirements)[::-1] # set up the stack + processed = {} # set of processed requirements + best = {} # key -> dist + to_activate = [] + + while requirements: + req = requirements.pop(0) # process dependencies breadth-first + if req in processed: + # Ignore cyclic or redundant dependencies + continue + dist = best.get(req.key) + if dist is None: + # Find the best distribution and add it to the map + dist = self.by_key.get(req.key) + if dist is None: + if env is None: + env = Environment(self.entries) + dist = best[req.key] = env.best_match(req, self, installer) + if dist is None: + #msg = ("The '%s' distribution was not found on this " + # "system, and is required by this application.") + #raise DistributionNotFound(msg % req) + + # unfortunately, zc.buildout uses a str(err) + # to get the name of the distribution here.. + raise DistributionNotFound(req) + to_activate.append(dist) + if dist not in req: + # Oops, the "best" so far conflicts with a dependency + raise VersionConflict(dist,req) # XXX put more info here + requirements.extend(dist.requires(req.extras)[::-1]) + processed[req] = True + + return to_activate # return list of distros to activate + + def find_plugins(self, plugin_env, full_env=None, installer=None, + fallback=True): + """Find all activatable distributions in `plugin_env` + + Example usage:: + + distributions, errors = working_set.find_plugins( + Environment(plugin_dirlist) + ) + map(working_set.add, distributions) # add plugins+libs to sys.path + print 'Could not load', errors # display errors + + The `plugin_env` should be an ``Environment`` instance that contains + only distributions that are in the project's "plugin directory" or + directories. The `full_env`, if supplied, should be an ``Environment`` + contains all currently-available distributions. If `full_env` is not + supplied, one is created automatically from the ``WorkingSet`` this + method is called on, which will typically mean that every directory on + ``sys.path`` will be scanned for distributions. + + `installer` is a standard installer callback as used by the + ``resolve()`` method. The `fallback` flag indicates whether we should + attempt to resolve older versions of a plugin if the newest version + cannot be resolved. + + This method returns a 2-tuple: (`distributions`, `error_info`), where + `distributions` is a list of the distributions found in `plugin_env` + that were loadable, along with any other distributions that are needed + to resolve their dependencies. `error_info` is a dictionary mapping + unloadable plugin distributions to an exception instance describing the + error that occurred. Usually this will be a ``DistributionNotFound`` or + ``VersionConflict`` instance. + """ + + plugin_projects = list(plugin_env) + plugin_projects.sort() # scan project names in alphabetic order + + error_info = {} + distributions = {} + + if full_env is None: + env = Environment(self.entries) + env += plugin_env + else: + env = full_env + plugin_env + + shadow_set = self.__class__([]) + list(map(shadow_set.add, self)) # put all our entries in shadow_set + + for project_name in plugin_projects: + + for dist in plugin_env[project_name]: + + req = [dist.as_requirement()] + + try: + resolvees = shadow_set.resolve(req, env, installer) + + except ResolutionError: + v = sys.exc_info()[1] + error_info[dist] = v # save error info + if fallback: + continue # try the next older version of project + else: + break # give up on this project, keep going + + else: + list(map(shadow_set.add, resolvees)) + distributions.update(dict.fromkeys(resolvees)) + + # success, no need to try any more versions of this project + break + + distributions = list(distributions) + distributions.sort() + + return distributions, error_info + + def require(self, *requirements): + """Ensure that distributions matching `requirements` are activated + + `requirements` must be a string or a (possibly-nested) sequence + thereof, specifying the distributions and versions required. The + return value is a sequence of the distributions that needed to be + activated to fulfill the requirements; all relevant distributions are + included, even if they were already activated in this working set. + """ + needed = self.resolve(parse_requirements(requirements)) + + for dist in needed: + self.add(dist) + + return needed + + def subscribe(self, callback): + """Invoke `callback` for all distributions (including existing ones)""" + if callback in self.callbacks: + return + self.callbacks.append(callback) + for dist in self: + callback(dist) + + def _added_new(self, dist): + for callback in self.callbacks: + callback(dist) + + def __getstate__(self): + return ( + self.entries[:], self.entry_keys.copy(), self.by_key.copy(), + self.callbacks[:] + ) + + def __setstate__(self, e_k_b_c): + entries, keys, by_key, callbacks = e_k_b_c + self.entries = entries[:] + self.entry_keys = keys.copy() + self.by_key = by_key.copy() + self.callbacks = callbacks[:] + + +class Environment(object): + """Searchable snapshot of distributions on a search path""" + + def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): + """Snapshot distributions available on a search path + + Any distributions found on `search_path` are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. + + `platform` is an optional string specifying the name of the platform + that platform-specific distributions must be compatible with. If + unspecified, it defaults to the current platform. `python` is an + optional string naming the desired version of Python (e.g. ``'2.4'``); + it defaults to the current version. + + You may explicitly set `platform` (and/or `python`) to ``None`` if you + wish to map *all* distributions, not just those compatible with the + running platform or Python version. + """ + self._distmap = {} + self._cache = {} + self.platform = platform + self.python = python + self.scan(search_path) + + def can_add(self, dist): + """Is distribution `dist` acceptable for this environment? + + The distribution must match the platform and python version + requirements specified when this environment was created, or False + is returned. + """ + return (self.python is None or dist.py_version is None + or dist.py_version==self.python) \ + and compatible_platforms(dist.platform,self.platform) + + def remove(self, dist): + """Remove `dist` from the environment""" + self._distmap[dist.key].remove(dist) + + def scan(self, search_path=None): + """Scan `search_path` for distributions usable in this environment + + Any distributions found are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. Only distributions conforming to + the platform/python version defined at initialization are added. + """ + if search_path is None: + search_path = sys.path + + for item in search_path: + for dist in find_distributions(item): + self.add(dist) + + def __getitem__(self,project_name): + """Return a newest-to-oldest list of distributions for `project_name` + """ + try: + return self._cache[project_name] + except KeyError: + project_name = project_name.lower() + if project_name not in self._distmap: + return [] + + if project_name not in self._cache: + dists = self._cache[project_name] = self._distmap[project_name] + _sort_dists(dists) + + return self._cache[project_name] + + def add(self,dist): + """Add `dist` if we ``can_add()`` it and it isn't already added""" + if self.can_add(dist) and dist.has_version(): + dists = self._distmap.setdefault(dist.key,[]) + if dist not in dists: + dists.append(dist) + if dist.key in self._cache: + _sort_dists(self._cache[dist.key]) + + def best_match(self, req, working_set, installer=None): + """Find distribution best matching `req` and usable on `working_set` + + This calls the ``find(req)`` method of the `working_set` to see if a + suitable distribution is already active. (This may raise + ``VersionConflict`` if an unsuitable version of the project is already + active in the specified `working_set`.) If a suitable distribution + isn't active, this method returns the newest distribution in the + environment that meets the ``Requirement`` in `req`. If no suitable + distribution is found, and `installer` is supplied, then the result of + calling the environment's ``obtain(req, installer)`` method will be + returned. + """ + dist = working_set.find(req) + if dist is not None: + return dist + for dist in self[req.key]: + if dist in req: + return dist + return self.obtain(req, installer) # try and download/install + + def obtain(self, requirement, installer=None): + """Obtain a distribution matching `requirement` (e.g. via download) + + Obtain a distro that matches requirement (e.g. via download). In the + base ``Environment`` class, this routine just returns + ``installer(requirement)``, unless `installer` is None, in which case + None is returned instead. This method is a hook that allows subclasses + to attempt other ways of obtaining a distribution before falling back + to the `installer` argument.""" + if installer is not None: + return installer(requirement) + + def __iter__(self): + """Yield the unique project names of the available distributions""" + for key in self._distmap.keys(): + if self[key]: yield key + + def __iadd__(self, other): + """In-place addition of a distribution or environment""" + if isinstance(other,Distribution): + self.add(other) + elif isinstance(other,Environment): + for project in other: + for dist in other[project]: + self.add(dist) + else: + raise TypeError("Can't add %r to environment" % (other,)) + return self + + def __add__(self, other): + """Add an environment or distribution to an environment""" + new = self.__class__([], platform=None, python=None) + for env in self, other: + new += env + return new + + +AvailableDistributions = Environment # XXX backward compatibility + + +class ExtractionError(RuntimeError): + """An error occurred extracting a resource + + The following attributes are available from instances of this exception: + + manager + The resource manager that raised this exception + + cache_path + The base directory for resource extraction + + original_error + The exception instance that caused extraction to fail + """ + + +class ResourceManager: + """Manage resource extraction and packages""" + extraction_path = None + + def __init__(self): + self.cached_files = {} + + def resource_exists(self, package_or_requirement, resource_name): + """Does the named resource exist?""" + return get_provider(package_or_requirement).has_resource(resource_name) + + def resource_isdir(self, package_or_requirement, resource_name): + """Is the named resource an existing directory?""" + return get_provider(package_or_requirement).resource_isdir( + resource_name + ) + + def resource_filename(self, package_or_requirement, resource_name): + """Return a true filesystem path for specified resource""" + return get_provider(package_or_requirement).get_resource_filename( + self, resource_name + ) + + def resource_stream(self, package_or_requirement, resource_name): + """Return a readable file-like object for specified resource""" + return get_provider(package_or_requirement).get_resource_stream( + self, resource_name + ) + + def resource_string(self, package_or_requirement, resource_name): + """Return specified resource as a string""" + return get_provider(package_or_requirement).get_resource_string( + self, resource_name + ) + + def resource_listdir(self, package_or_requirement, resource_name): + """List the contents of the named resource directory""" + return get_provider(package_or_requirement).resource_listdir( + resource_name + ) + + def extraction_error(self): + """Give an error message for problems extracting file(s)""" + + old_exc = sys.exc_info()[1] + cache_path = self.extraction_path or get_default_cache() + + err = ExtractionError("""Can't extract file(s) to egg cache + +The following error occurred while trying to extract file(s) to the Python egg +cache: + + %s + +The Python egg cache directory is currently set to: + + %s + +Perhaps your account does not have write access to this directory? You can +change the cache directory by setting the PYTHON_EGG_CACHE environment +variable to point to an accessible directory. +""" % (old_exc, cache_path) + ) + err.manager = self + err.cache_path = cache_path + err.original_error = old_exc + raise err + + def get_cache_path(self, archive_name, names=()): + """Return absolute location in cache for `archive_name` and `names` + + The parent directory of the resulting path will be created if it does + not already exist. `archive_name` should be the base filename of the + enclosing egg (which may not be the name of the enclosing zipfile!), + including its ".egg" extension. `names`, if provided, should be a + sequence of path name parts "under" the egg's extraction location. + + This method should only be called by resource providers that need to + obtain an extraction location, and only for names they intend to + extract, as it tracks the generated names for possible cleanup later. + """ + extract_path = self.extraction_path or get_default_cache() + target_path = os.path.join(extract_path, archive_name+'-tmp', *names) + try: + _bypass_ensure_directory(target_path) + except: + self.extraction_error() + + self._warn_unsafe_extraction_path(extract_path) + + self.cached_files[target_path] = 1 + return target_path + + @staticmethod + def _warn_unsafe_extraction_path(path): + """ + If the default extraction path is overridden and set to an insecure + location, such as /tmp, it opens up an opportunity for an attacker to + replace an extracted file with an unauthorized payload. Warn the user + if a known insecure location is used. + + See Distribute #375 for more details. + """ + if os.name == 'nt' and not path.startswith(os.environ['windir']): + # On Windows, permissions are generally restrictive by default + # and temp directories are not writable by other users, so + # bypass the warning. + return + mode = os.stat(path).st_mode + if mode & stat.S_IWOTH or mode & stat.S_IWGRP: + msg = ("%s is writable by group/others and vulnerable to attack " + "when " + "used with get_resource_filename. Consider a more secure " + "location (set with .set_extraction_path or the " + "PYTHON_EGG_CACHE environment variable)." % path) + warnings.warn(msg, UserWarning) + + def postprocess(self, tempname, filename): + """Perform any platform-specific postprocessing of `tempname` + + This is where Mac header rewrites should be done; other platforms don't + have anything special they should do. + + Resource providers should call this method ONLY after successfully + extracting a compressed resource. They must NOT call it on resources + that are already in the filesystem. + + `tempname` is the current (temporary) name of the file, and `filename` + is the name it will be renamed to by the caller after this routine + returns. + """ + + if os.name == 'posix': + # Make the resource executable + mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777 + os.chmod(tempname, mode) + + def set_extraction_path(self, path): + """Set the base path where resources will be extracted to, if needed. + + If you do not call this routine before any extractions take place, the + path defaults to the return value of ``get_default_cache()``. (Which + is based on the ``PYTHON_EGG_CACHE`` environment variable, with various + platform-specific fallbacks. See that routine's documentation for more + details.) + + Resources are extracted to subdirectories of this path based upon + information given by the ``IResourceProvider``. You may set this to a + temporary directory, but then you must call ``cleanup_resources()`` to + delete the extracted files when done. There is no guarantee that + ``cleanup_resources()`` will be able to remove all extracted files. + + (Note: you may not change the extraction path for a given resource + manager once resources have been extracted, unless you first call + ``cleanup_resources()``.) + """ + if self.cached_files: + raise ValueError( + "Can't change extraction path, files already extracted" + ) + + self.extraction_path = path + + def cleanup_resources(self, force=False): + """ + Delete all extracted resource files and directories, returning a list + of the file and directory names that could not be successfully removed. + This function does not have any concurrency protection, so it should + generally only be called when the extraction path is a temporary + directory exclusive to a single process. This method is not + automatically called; you must call it explicitly or register it as an + ``atexit`` function if you wish to ensure cleanup of a temporary + directory used for extractions. + """ + # XXX + +def get_default_cache(): + """Determine the default cache location + + This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. + Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the + "Application Data" directory. On all other systems, it's "~/.python-eggs". + """ + try: + return os.environ['PYTHON_EGG_CACHE'] + except KeyError: + pass + + if os.name!='nt': + return os.path.expanduser('~/.python-eggs') + + app_data = 'Application Data' # XXX this may be locale-specific! + app_homes = [ + (('APPDATA',), None), # best option, should be locale-safe + (('USERPROFILE',), app_data), + (('HOMEDRIVE','HOMEPATH'), app_data), + (('HOMEPATH',), app_data), + (('HOME',), None), + (('WINDIR',), app_data), # 95/98/ME + ] + + for keys, subdir in app_homes: + dirname = '' + for key in keys: + if key in os.environ: + dirname = os.path.join(dirname, os.environ[key]) + else: + break + else: + if subdir: + dirname = os.path.join(dirname,subdir) + return os.path.join(dirname, 'Python-Eggs') + else: + raise RuntimeError( + "Please set the PYTHON_EGG_CACHE enviroment variable" + ) + +def safe_name(name): + """Convert an arbitrary string to a standard distribution name + + Any runs of non-alphanumeric/. characters are replaced with a single '-'. + """ + return re.sub('[^A-Za-z0-9.]+', '-', name) + + +def safe_version(version): + """Convert an arbitrary string to a standard version string + + Spaces become dots, and all other non-alphanumeric characters become + dashes, with runs of multiple dashes condensed to a single dash. + """ + version = version.replace(' ','.') + return re.sub('[^A-Za-z0-9.]+', '-', version) + + +def safe_extra(extra): + """Convert an arbitrary string to a standard 'extra' name + + Any runs of non-alphanumeric characters are replaced with a single '_', + and the result is always lowercased. + """ + return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() + + +def to_filename(name): + """Convert a project or version name to its filename-escaped form + + Any '-' characters are currently replaced with '_'. + """ + return name.replace('-','_') + +_marker_names = { + 'os': ['name'], 'sys': ['platform'], + 'platform': ['version','machine','python_implementation'], + 'python_version': [], 'python_full_version': [], 'extra':[], +} + +_marker_values = { + 'os_name': lambda: os.name, + 'sys_platform': lambda: sys.platform, + 'python_full_version': lambda: sys.version.split()[0], + 'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]), + 'platform_version': lambda: _platinfo('version'), + 'platform_machine': lambda: _platinfo('machine'), + 'python_implementation': lambda: _platinfo('python_implementation') or _pyimp(), +} + +def _platinfo(attr): + try: + import platform + except ImportError: + return '' + return getattr(platform, attr, lambda:'')() + +def _pyimp(): + if sys.platform=='cli': + return 'IronPython' + elif sys.platform.startswith('java'): + return 'Jython' + elif '__pypy__' in sys.builtin_module_names: + return 'PyPy' + else: + return 'CPython' + +def normalize_exception(exc): + """ + Given a SyntaxError from a marker evaluation, normalize the error message: + - Remove indications of filename and line number. + - Replace platform-specific error messages with standard error messages. + """ + subs = { + 'unexpected EOF while parsing': 'invalid syntax', + 'parenthesis is never closed': 'invalid syntax', + } + exc.filename = None + exc.lineno = None + exc.msg = subs.get(exc.msg, exc.msg) + return exc + + +def invalid_marker(text): + """Validate text as a PEP 426 environment marker; return exception or False""" + try: + evaluate_marker(text) + except SyntaxError: + return normalize_exception(sys.exc_info()[1]) + return False + +def evaluate_marker(text, extra=None, _ops={}): + """ + Evaluate a PEP 426 environment marker on CPython 2.4+. + Return a boolean indicating the marker result in this environment. + Raise SyntaxError if marker is invalid. + + This implementation uses the 'parser' module, which is not implemented on + Jython and has been superseded by the 'ast' module in Python 2.6 and + later. + """ + + if not _ops: + + from token import NAME, STRING + import token + import symbol + import operator + + def and_test(nodelist): + # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! + return reduce(operator.and_, [interpret(nodelist[i]) for i in range(1,len(nodelist),2)]) + + def test(nodelist): + # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! + return reduce(operator.or_, [interpret(nodelist[i]) for i in range(1,len(nodelist),2)]) + + def atom(nodelist): + t = nodelist[1][0] + if t == token.LPAR: + if nodelist[2][0] == token.RPAR: + raise SyntaxError("Empty parentheses") + return interpret(nodelist[2]) + raise SyntaxError("Language feature not supported in environment markers") + + def comparison(nodelist): + if len(nodelist)>4: + raise SyntaxError("Chained comparison not allowed in environment markers") + comp = nodelist[2][1] + cop = comp[1] + if comp[0] == NAME: + if len(nodelist[2]) == 3: + if cop == 'not': + cop = 'not in' + else: + cop = 'is not' + try: + cop = _ops[cop] + except KeyError: + raise SyntaxError(repr(cop)+" operator not allowed in environment markers") + return cop(evaluate(nodelist[1]), evaluate(nodelist[3])) + + _ops.update({ + symbol.test: test, symbol.and_test: and_test, symbol.atom: atom, + symbol.comparison: comparison, 'not in': lambda x,y: x not in y, + 'in': lambda x,y: x in y, '==': operator.eq, '!=': operator.ne, + }) + if hasattr(symbol,'or_test'): + _ops[symbol.or_test] = test + + def interpret(nodelist): + while len(nodelist)==2: nodelist = nodelist[1] + try: + op = _ops[nodelist[0]] + except KeyError: + raise SyntaxError("Comparison or logical expression expected") + raise SyntaxError("Language feature not supported in environment markers: "+symbol.sym_name[nodelist[0]]) + return op(nodelist) + + def evaluate(nodelist): + while len(nodelist)==2: nodelist = nodelist[1] + kind = nodelist[0] + name = nodelist[1] + #while len(name)==2: name = name[1] + if kind==NAME: + try: + op = _marker_values[name] + except KeyError: + raise SyntaxError("Unknown name %r" % name) + return op() + if kind==STRING: + s = nodelist[1] + if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \ + or '\\' in s: + raise SyntaxError( + "Only plain strings allowed in environment markers") + return s[1:-1] + raise SyntaxError("Language feature not supported in environment markers") + + return interpret(parser.expr(text).totuple(1)[1]) + +def _markerlib_evaluate(text): + """ + Evaluate a PEP 426 environment marker using markerlib. + Return a boolean indicating the marker result in this environment. + Raise SyntaxError if marker is invalid. + """ + import _markerlib + # markerlib implements Metadata 1.2 (PEP 345) environment markers. + # Translate the variables to Metadata 2.0 (PEP 426). + env = _markerlib.default_environment() + for key in env.keys(): + new_key = key.replace('.', '_') + env[new_key] = env.pop(key) + try: + result = _markerlib.interpret(text, env) + except NameError: + e = sys.exc_info()[1] + raise SyntaxError(e.args[0]) + return result + +if 'parser' not in globals(): + # fallback to less-complete _markerlib implementation if 'parser' module + # is not available. + evaluate_marker = _markerlib_evaluate + +class NullProvider: + """Try to implement resources and metadata for arbitrary PEP 302 loaders""" + + egg_name = None + egg_info = None + loader = None + + def __init__(self, module): + self.loader = getattr(module, '__loader__', None) + self.module_path = os.path.dirname(getattr(module, '__file__', '')) + + def get_resource_filename(self, manager, resource_name): + return self._fn(self.module_path, resource_name) + + def get_resource_stream(self, manager, resource_name): + return BytesIO(self.get_resource_string(manager, resource_name)) + + def get_resource_string(self, manager, resource_name): + return self._get(self._fn(self.module_path, resource_name)) + + def has_resource(self, resource_name): + return self._has(self._fn(self.module_path, resource_name)) + + def has_metadata(self, name): + return self.egg_info and self._has(self._fn(self.egg_info,name)) + + if sys.version_info <= (3,): + def get_metadata(self, name): + if not self.egg_info: + return "" + return self._get(self._fn(self.egg_info,name)) + else: + def get_metadata(self, name): + if not self.egg_info: + return "" + return self._get(self._fn(self.egg_info,name)).decode("utf-8") + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + def resource_isdir(self,resource_name): + return self._isdir(self._fn(self.module_path, resource_name)) + + def metadata_isdir(self,name): + return self.egg_info and self._isdir(self._fn(self.egg_info,name)) + + def resource_listdir(self,resource_name): + return self._listdir(self._fn(self.module_path,resource_name)) + + def metadata_listdir(self,name): + if self.egg_info: + return self._listdir(self._fn(self.egg_info,name)) + return [] + + def run_script(self,script_name,namespace): + script = 'scripts/'+script_name + if not self.has_metadata(script): + raise ResolutionError("No script named %r" % script_name) + script_text = self.get_metadata(script).replace('\r\n','\n') + script_text = script_text.replace('\r','\n') + script_filename = self._fn(self.egg_info,script) + namespace['__file__'] = script_filename + if os.path.exists(script_filename): + execfile(script_filename, namespace, namespace) + else: + from linecache import cache + cache[script_filename] = ( + len(script_text), 0, script_text.split('\n'), script_filename + ) + script_code = compile(script_text,script_filename,'exec') + exec_(script_code, namespace, namespace) + + def _has(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _isdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _listdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _fn(self, base, resource_name): + if resource_name: + return os.path.join(base, *resource_name.split('/')) + return base + + def _get(self, path): + if hasattr(self.loader, 'get_data'): + return self.loader.get_data(path) + raise NotImplementedError( + "Can't perform this operation for loaders without 'get_data()'" + ) + +register_loader_type(object, NullProvider) + + +class EggProvider(NullProvider): + """Provider based on a virtual filesystem""" + + def __init__(self,module): + NullProvider.__init__(self,module) + self._setup_prefix() + + def _setup_prefix(self): + # we assume here that our metadata may be nested inside a "basket" + # of multiple eggs; that's why we use module_path instead of .archive + path = self.module_path + old = None + while path!=old: + if path.lower().endswith('.egg'): + self.egg_name = os.path.basename(path) + self.egg_info = os.path.join(path, 'EGG-INFO') + self.egg_root = path + break + old = path + path, base = os.path.split(path) + +class DefaultProvider(EggProvider): + """Provides access to package resources in the filesystem""" + + def _has(self, path): + return os.path.exists(path) + + def _isdir(self,path): + return os.path.isdir(path) + + def _listdir(self,path): + return os.listdir(path) + + def get_resource_stream(self, manager, resource_name): + return open(self._fn(self.module_path, resource_name), 'rb') + + def _get(self, path): + stream = open(path, 'rb') + try: + return stream.read() + finally: + stream.close() + +register_loader_type(type(None), DefaultProvider) + +if importlib_bootstrap is not None: + register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider) + + +class EmptyProvider(NullProvider): + """Provider that returns nothing for all requests""" + + _isdir = _has = lambda self,path: False + _get = lambda self,path: '' + _listdir = lambda self,path: [] + module_path = None + + def __init__(self): + pass + +empty_provider = EmptyProvider() + + +def build_zipmanifest(path): + """ + This builds a similar dictionary to the zipimport directory + caches. However instead of tuples, ZipInfo objects are stored. + + The translation of the tuple is as follows: + * [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep + on pypy it is the same (one reason why distribute did work + in some cases on pypy and win32). + * [1] - zipinfo.compress_type + * [2] - zipinfo.compress_size + * [3] - zipinfo.file_size + * [4] - len(utf-8 encoding of filename) if zipinfo & 0x800 + len(ascii encoding of filename) otherwise + * [5] - (zipinfo.date_time[0] - 1980) << 9 | + zipinfo.date_time[1] << 5 | zipinfo.date_time[2] + * [6] - (zipinfo.date_time[3] - 1980) << 11 | + zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2) + * [7] - zipinfo.CRC + """ + zipinfo = dict() + zfile = zipfile.ZipFile(path) + #Got ZipFile has not __exit__ on python 3.1 + try: + for zitem in zfile.namelist(): + zpath = zitem.replace('/', os.sep) + zipinfo[zpath] = zfile.getinfo(zitem) + assert zipinfo[zpath] is not None + finally: + zfile.close() + return zipinfo + + +class ZipProvider(EggProvider): + """Resource support for zips and eggs""" + + eagers = None + + def __init__(self, module): + EggProvider.__init__(self,module) + self.zipinfo = build_zipmanifest(self.loader.archive) + self.zip_pre = self.loader.archive+os.sep + + def _zipinfo_name(self, fspath): + # Convert a virtual filename (full path to file) into a zipfile subpath + # usable with the zipimport directory cache for our target archive + if fspath.startswith(self.zip_pre): + return fspath[len(self.zip_pre):] + raise AssertionError( + "%s is not a subpath of %s" % (fspath,self.zip_pre) + ) + + def _parts(self,zip_path): + # Convert a zipfile subpath into an egg-relative path part list + fspath = self.zip_pre+zip_path # pseudo-fs path + if fspath.startswith(self.egg_root+os.sep): + return fspath[len(self.egg_root)+1:].split(os.sep) + raise AssertionError( + "%s is not a subpath of %s" % (fspath,self.egg_root) + ) + + def get_resource_filename(self, manager, resource_name): + if not self.egg_name: + raise NotImplementedError( + "resource_filename() only supported for .egg, not .zip" + ) + # no need to lock for extraction, since we use temp names + zip_path = self._resource_to_zip(resource_name) + eagers = self._get_eager_resources() + if '/'.join(self._parts(zip_path)) in eagers: + for name in eagers: + self._extract_resource(manager, self._eager_to_zip(name)) + return self._extract_resource(manager, zip_path) + + @staticmethod + def _get_date_and_size(zip_stat): + size = zip_stat.file_size + date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst + #1980 offset already done + timestamp = time.mktime(date_time) + return timestamp, size + + def _extract_resource(self, manager, zip_path): + + if zip_path in self._index(): + for name in self._index()[zip_path]: + last = self._extract_resource( + manager, os.path.join(zip_path, name) + ) + return os.path.dirname(last) # return the extracted directory name + + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + + if not WRITE_SUPPORT: + raise IOError('"os.rename" and "os.unlink" are not supported ' + 'on this platform') + try: + + real_path = manager.get_cache_path( + self.egg_name, self._parts(zip_path) + ) + + if self._is_current(real_path, zip_path): + return real_path + + outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) + os.write(outf, self.loader.get_data(zip_path)) + os.close(outf) + utime(tmpnam, (timestamp,timestamp)) + manager.postprocess(tmpnam, real_path) + + try: + rename(tmpnam, real_path) + + except os.error: + if os.path.isfile(real_path): + if self._is_current(real_path, zip_path): + # the file became current since it was checked above, + # so proceed. + return real_path + elif os.name=='nt': # Windows, del old file and retry + unlink(real_path) + rename(tmpnam, real_path) + return real_path + raise + + except os.error: + manager.extraction_error() # report a user-friendly error + + return real_path + + def _is_current(self, file_path, zip_path): + """ + Return True if the file_path is current for this zip_path + """ + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + if not os.path.isfile(file_path): + return False + stat = os.stat(file_path) + if stat.st_size!=size or stat.st_mtime!=timestamp: + return False + # check that the contents match + zip_contents = self.loader.get_data(zip_path) + f = open(file_path, 'rb') + file_contents = f.read() + f.close() + return zip_contents == file_contents + + def _get_eager_resources(self): + if self.eagers is None: + eagers = [] + for name in ('native_libs.txt', 'eager_resources.txt'): + if self.has_metadata(name): + eagers.extend(self.get_metadata_lines(name)) + self.eagers = eagers + return self.eagers + + def _index(self): + try: + return self._dirindex + except AttributeError: + ind = {} + for path in self.zipinfo: + parts = path.split(os.sep) + while parts: + parent = os.sep.join(parts[:-1]) + if parent in ind: + ind[parent].append(parts[-1]) + break + else: + ind[parent] = [parts.pop()] + self._dirindex = ind + return ind + + def _has(self, fspath): + zip_path = self._zipinfo_name(fspath) + return zip_path in self.zipinfo or zip_path in self._index() + + def _isdir(self,fspath): + return self._zipinfo_name(fspath) in self._index() + + def _listdir(self,fspath): + return list(self._index().get(self._zipinfo_name(fspath), ())) + + def _eager_to_zip(self,resource_name): + return self._zipinfo_name(self._fn(self.egg_root,resource_name)) + + def _resource_to_zip(self,resource_name): + return self._zipinfo_name(self._fn(self.module_path,resource_name)) + +register_loader_type(zipimport.zipimporter, ZipProvider) + + +class FileMetadata(EmptyProvider): + """Metadata handler for standalone PKG-INFO files + + Usage:: + + metadata = FileMetadata("/path/to/PKG-INFO") + + This provider rejects all data and metadata requests except for PKG-INFO, + which is treated as existing, and will be the contents of the file at + the provided location. + """ + + def __init__(self,path): + self.path = path + + def has_metadata(self,name): + return name=='PKG-INFO' + + def get_metadata(self,name): + if name=='PKG-INFO': + f = open(self.path,'rU') + metadata = f.read() + f.close() + return metadata + raise KeyError("No metadata except PKG-INFO is available") + + def get_metadata_lines(self,name): + return yield_lines(self.get_metadata(name)) + + +class PathMetadata(DefaultProvider): + """Metadata provider for egg directories + + Usage:: + + # Development eggs: + + egg_info = "/path/to/PackageName.egg-info" + base_dir = os.path.dirname(egg_info) + metadata = PathMetadata(base_dir, egg_info) + dist_name = os.path.splitext(os.path.basename(egg_info))[0] + dist = Distribution(basedir,project_name=dist_name,metadata=metadata) + + # Unpacked egg directories: + + egg_path = "/path/to/PackageName-ver-pyver-etc.egg" + metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) + dist = Distribution.from_filename(egg_path, metadata=metadata) + """ + + def __init__(self, path, egg_info): + self.module_path = path + self.egg_info = egg_info + + +class EggMetadata(ZipProvider): + """Metadata provider for .egg files""" + + def __init__(self, importer): + """Create a metadata provider from a zipimporter""" + + self.zipinfo = build_zipmanifest(importer.archive) + self.zip_pre = importer.archive+os.sep + self.loader = importer + if importer.prefix: + self.module_path = os.path.join(importer.archive, importer.prefix) + else: + self.module_path = importer.archive + self._setup_prefix() + + +class ImpWrapper: + """PEP 302 Importer that wraps Python's "normal" import algorithm""" + + def __init__(self, path=None): + self.path = path + + def find_module(self, fullname, path=None): + subname = fullname.split(".")[-1] + if subname != fullname and self.path is None: + return None + if self.path is None: + path = None + else: + path = [self.path] + try: + file, filename, etc = imp.find_module(subname, path) + except ImportError: + return None + return ImpLoader(file, filename, etc) + + +class ImpLoader: + """PEP 302 Loader that wraps Python's "normal" import algorithm""" + + def __init__(self, file, filename, etc): + self.file = file + self.filename = filename + self.etc = etc + + def load_module(self, fullname): + try: + mod = imp.load_module(fullname, self.file, self.filename, self.etc) + finally: + if self.file: self.file.close() + # Note: we don't set __loader__ because we want the module to look + # normal; i.e. this is just a wrapper for standard import machinery + return mod + + +def get_importer(path_item): + """Retrieve a PEP 302 "importer" for the given path item + + If there is no importer, this returns a wrapper around the builtin import + machinery. The returned importer is only cached if it was created by a + path hook. + """ + try: + importer = sys.path_importer_cache[path_item] + except KeyError: + for hook in sys.path_hooks: + try: + importer = hook(path_item) + except ImportError: + pass + else: + break + else: + importer = None + + sys.path_importer_cache.setdefault(path_item,importer) + if importer is None: + try: + importer = ImpWrapper(path_item) + except ImportError: + pass + return importer + +try: + from pkgutil import get_importer, ImpImporter +except ImportError: + pass # Python 2.3 or 2.4, use our own implementation +else: + ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation + del ImpLoader, ImpImporter + + +_declare_state('dict', _distribution_finders = {}) + +def register_finder(importer_type, distribution_finder): + """Register `distribution_finder` to find distributions in sys.path items + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `distribution_finder` is a callable that, passed a path + item and the importer instance, yields ``Distribution`` instances found on + that path item. See ``pkg_resources.find_on_path`` for an example.""" + _distribution_finders[importer_type] = distribution_finder + + +def find_distributions(path_item, only=False): + """Yield distributions accessible via `path_item`""" + importer = get_importer(path_item) + finder = _find_adapter(_distribution_finders, importer) + return finder(importer, path_item, only) + +def find_in_zip(importer, path_item, only=False): + metadata = EggMetadata(importer) + if metadata.has_metadata('PKG-INFO'): + yield Distribution.from_filename(path_item, metadata=metadata) + if only: + return # don't yield nested distros + for subitem in metadata.resource_listdir('/'): + if subitem.endswith('.egg'): + subpath = os.path.join(path_item, subitem) + for dist in find_in_zip(zipimport.zipimporter(subpath), subpath): + yield dist + +register_finder(zipimport.zipimporter, find_in_zip) + +def find_nothing(importer, path_item, only=False): + return () +register_finder(object,find_nothing) + +def find_on_path(importer, path_item, only=False): + """Yield distributions accessible on a sys.path directory""" + path_item = _normalize_cached(path_item) + + if os.path.isdir(path_item) and os.access(path_item, os.R_OK): + if path_item.lower().endswith('.egg'): + # unpacked egg + yield Distribution.from_filename( + path_item, metadata=PathMetadata( + path_item, os.path.join(path_item,'EGG-INFO') + ) + ) + else: + # scan for .egg and .egg-info in directory + for entry in os.listdir(path_item): + lower = entry.lower() + if lower.endswith('.egg-info') or lower.endswith('.dist-info'): + fullpath = os.path.join(path_item, entry) + if os.path.isdir(fullpath): + # egg-info directory, allow getting metadata + metadata = PathMetadata(path_item, fullpath) + else: + metadata = FileMetadata(fullpath) + yield Distribution.from_location( + path_item,entry,metadata,precedence=DEVELOP_DIST + ) + elif not only and lower.endswith('.egg'): + for dist in find_distributions(os.path.join(path_item, entry)): + yield dist + elif not only and lower.endswith('.egg-link'): + entry_file = open(os.path.join(path_item, entry)) + try: + entry_lines = entry_file.readlines() + finally: + entry_file.close() + for line in entry_lines: + if not line.strip(): continue + for item in find_distributions(os.path.join(path_item,line.rstrip())): + yield item + break +register_finder(ImpWrapper,find_on_path) + +if importlib_bootstrap is not None: + register_finder(importlib_bootstrap.FileFinder, find_on_path) + +_declare_state('dict', _namespace_handlers={}) +_declare_state('dict', _namespace_packages={}) + + +def register_namespace_handler(importer_type, namespace_handler): + """Register `namespace_handler` to declare namespace packages + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `namespace_handler` is a callable like this:: + + def namespace_handler(importer,path_entry,moduleName,module): + # return a path_entry to use for child packages + + Namespace handlers are only called if the importer object has already + agreed that it can handle the relevant path item, and they should only + return a subpath if the module __path__ does not already contain an + equivalent subpath. For an example namespace handler, see + ``pkg_resources.file_ns_handler``. + """ + _namespace_handlers[importer_type] = namespace_handler + +def _handle_ns(packageName, path_item): + """Ensure that named package includes a subpath of path_item (if needed)""" + importer = get_importer(path_item) + if importer is None: + return None + loader = importer.find_module(packageName) + if loader is None: + return None + module = sys.modules.get(packageName) + if module is None: + module = sys.modules[packageName] = imp.new_module(packageName) + module.__path__ = []; _set_parent_ns(packageName) + elif not hasattr(module,'__path__'): + raise TypeError("Not a package:", packageName) + handler = _find_adapter(_namespace_handlers, importer) + subpath = handler(importer,path_item,packageName,module) + if subpath is not None: + path = module.__path__; path.append(subpath) + loader.load_module(packageName); module.__path__ = path + return subpath + +def declare_namespace(packageName): + """Declare that package 'packageName' is a namespace package""" + + imp.acquire_lock() + try: + if packageName in _namespace_packages: + return + + path, parent = sys.path, None + if '.' in packageName: + parent = '.'.join(packageName.split('.')[:-1]) + declare_namespace(parent) + if parent not in _namespace_packages: + __import__(parent) + try: + path = sys.modules[parent].__path__ + except AttributeError: + raise TypeError("Not a package:", parent) + + # Track what packages are namespaces, so when new path items are added, + # they can be updated + _namespace_packages.setdefault(parent,[]).append(packageName) + _namespace_packages.setdefault(packageName,[]) + + for path_item in path: + # Ensure all the parent's path items are reflected in the child, + # if they apply + _handle_ns(packageName, path_item) + + finally: + imp.release_lock() + +def fixup_namespace_packages(path_item, parent=None): + """Ensure that previously-declared namespace packages include path_item""" + imp.acquire_lock() + try: + for package in _namespace_packages.get(parent,()): + subpath = _handle_ns(package, path_item) + if subpath: fixup_namespace_packages(subpath,package) + finally: + imp.release_lock() + +def file_ns_handler(importer, path_item, packageName, module): + """Compute an ns-package subpath for a filesystem or zipfile importer""" + + subpath = os.path.join(path_item, packageName.split('.')[-1]) + normalized = _normalize_cached(subpath) + for item in module.__path__: + if _normalize_cached(item)==normalized: + break + else: + # Only return the path if it's not already there + return subpath + +register_namespace_handler(ImpWrapper,file_ns_handler) +register_namespace_handler(zipimport.zipimporter,file_ns_handler) + +if importlib_bootstrap is not None: + register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler) + + +def null_ns_handler(importer, path_item, packageName, module): + return None + +register_namespace_handler(object,null_ns_handler) + + +def normalize_path(filename): + """Normalize a file/dir name for comparison purposes""" + return os.path.normcase(os.path.realpath(filename)) + +def _normalize_cached(filename,_cache={}): + try: + return _cache[filename] + except KeyError: + _cache[filename] = result = normalize_path(filename) + return result + +def _set_parent_ns(packageName): + parts = packageName.split('.') + name = parts.pop() + if parts: + parent = '.'.join(parts) + setattr(sys.modules[parent], name, sys.modules[packageName]) + + +def yield_lines(strs): + """Yield non-empty/non-comment lines of a ``basestring`` or sequence""" + if isinstance(strs,basestring): + for s in strs.splitlines(): + s = s.strip() + if s and not s.startswith('#'): # skip blank lines/comments + yield s + else: + for ss in strs: + for s in yield_lines(ss): + yield s + +LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment +CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation +DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra +VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info +COMMA = re.compile(r"\s*,").match # comma between items +OBRACKET = re.compile(r"\s*\[").match +CBRACKET = re.compile(r"\s*\]").match +MODULE = re.compile(r"\w+(\.\w+)*$").match +EGG_NAME = re.compile( + r"(?P<name>[^-]+)" + r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?", + re.VERBOSE | re.IGNORECASE +).match + +component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) +replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get + +def _parse_version_parts(s): + for part in component_re.split(s): + part = replace(part,part) + if not part or part=='.': + continue + if part[:1] in '0123456789': + yield part.zfill(8) # pad for numeric comparison + else: + yield '*'+part + + yield '*final' # ensure that alpha/beta/candidate are before final + +def parse_version(s): + """Convert a version string to a chronologically-sortable key + + This is a rough cross between distutils' StrictVersion and LooseVersion; + if you give it versions that would work with StrictVersion, then it behaves + the same; otherwise it acts like a slightly-smarter LooseVersion. It is + *possible* to create pathological version coding schemes that will fool + this parser, but they should be very rare in practice. + + The returned value will be a tuple of strings. Numeric portions of the + version are padded to 8 digits so they will compare numerically, but + without relying on how numbers compare relative to strings. Dots are + dropped, but dashes are retained. Trailing zeros between alpha segments + or dashes are suppressed, so that e.g. "2.4.0" is considered the same as + "2.4". Alphanumeric parts are lower-cased. + + The algorithm assumes that strings like "-" and any alpha string that + alphabetically follows "final" represents a "patch level". So, "2.4-1" + is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is + considered newer than "2.4-1", which in turn is newer than "2.4". + + Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that + come before "final" alphabetically) are assumed to be pre-release versions, + so that the version "2.4" is considered newer than "2.4a1". + + Finally, to handle miscellaneous cases, the strings "pre", "preview", and + "rc" are treated as if they were "c", i.e. as though they were release + candidates, and therefore are not as new as a version string that does not + contain them, and "dev" is replaced with an '@' so that it sorts lower than + than any other pre-release tag. + """ + parts = [] + for part in _parse_version_parts(s.lower()): + if part.startswith('*'): + if part<'*final': # remove '-' before a prerelease tag + while parts and parts[-1]=='*final-': parts.pop() + # remove trailing zeros from each series of numeric parts + while parts and parts[-1]=='00000000': + parts.pop() + parts.append(part) + return tuple(parts) +class EntryPoint(object): + """Object representing an advertised importable object""" + + def __init__(self, name, module_name, attrs=(), extras=(), dist=None): + if not MODULE(module_name): + raise ValueError("Invalid module name", module_name) + self.name = name + self.module_name = module_name + self.attrs = tuple(attrs) + self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras + self.dist = dist + + def __str__(self): + s = "%s = %s" % (self.name, self.module_name) + if self.attrs: + s += ':' + '.'.join(self.attrs) + if self.extras: + s += ' [%s]' % ','.join(self.extras) + return s + + def __repr__(self): + return "EntryPoint.parse(%r)" % str(self) + + def load(self, require=True, env=None, installer=None): + if require: self.require(env, installer) + entry = __import__(self.module_name, globals(),globals(), ['__name__']) + for attr in self.attrs: + try: + entry = getattr(entry,attr) + except AttributeError: + raise ImportError("%r has no %r attribute" % (entry,attr)) + return entry + + def require(self, env=None, installer=None): + if self.extras and not self.dist: + raise UnknownExtra("Can't require() without a distribution", self) + list(map(working_set.add, + working_set.resolve(self.dist.requires(self.extras),env,installer))) + + #@classmethod + def parse(cls, src, dist=None): + """Parse a single entry point from string `src` + + Entry point syntax follows the form:: + + name = some.module:some.attr [extra1,extra2] + + The entry name and module name are required, but the ``:attrs`` and + ``[extras]`` parts are optional + """ + try: + attrs = extras = () + name,value = src.split('=',1) + if '[' in value: + value,extras = value.split('[',1) + req = Requirement.parse("x["+extras) + if req.specs: raise ValueError + extras = req.extras + if ':' in value: + value,attrs = value.split(':',1) + if not MODULE(attrs.rstrip()): + raise ValueError + attrs = attrs.rstrip().split('.') + except ValueError: + raise ValueError( + "EntryPoint must be in 'name=module:attrs [extras]' format", + src + ) + else: + return cls(name.strip(), value.strip(), attrs, extras, dist) + + parse = classmethod(parse) + + #@classmethod + def parse_group(cls, group, lines, dist=None): + """Parse an entry point group""" + if not MODULE(group): + raise ValueError("Invalid group name", group) + this = {} + for line in yield_lines(lines): + ep = cls.parse(line, dist) + if ep.name in this: + raise ValueError("Duplicate entry point", group, ep.name) + this[ep.name]=ep + return this + + parse_group = classmethod(parse_group) + + #@classmethod + def parse_map(cls, data, dist=None): + """Parse a map of entry point groups""" + if isinstance(data,dict): + data = data.items() + else: + data = split_sections(data) + maps = {} + for group, lines in data: + if group is None: + if not lines: + continue + raise ValueError("Entry points must be listed in groups") + group = group.strip() + if group in maps: + raise ValueError("Duplicate group name", group) + maps[group] = cls.parse_group(group, lines, dist) + return maps + + parse_map = classmethod(parse_map) + + +def _remove_md5_fragment(location): + if not location: + return '' + parsed = urlparse(location) + if parsed[-1].startswith('md5='): + return urlunparse(parsed[:-1] + ('',)) + return location + + +class Distribution(object): + """Wrap an actual or potential sys.path entry w/metadata""" + PKG_INFO = 'PKG-INFO' + + def __init__(self, location=None, metadata=None, project_name=None, + version=None, py_version=PY_MAJOR, platform=None, + precedence=EGG_DIST): + self.project_name = safe_name(project_name or 'Unknown') + if version is not None: + self._version = safe_version(version) + self.py_version = py_version + self.platform = platform + self.location = location + self.precedence = precedence + self._provider = metadata or empty_provider + + #@classmethod + def from_location(cls,location,basename,metadata=None,**kw): + project_name, version, py_version, platform = [None]*4 + basename, ext = os.path.splitext(basename) + if ext.lower() in _distributionImpl: + # .dist-info gets much metadata differently + match = EGG_NAME(basename) + if match: + project_name, version, py_version, platform = match.group( + 'name','ver','pyver','plat' + ) + cls = _distributionImpl[ext.lower()] + return cls( + location, metadata, project_name=project_name, version=version, + py_version=py_version, platform=platform, **kw + ) + from_location = classmethod(from_location) + + hashcmp = property( + lambda self: ( + getattr(self,'parsed_version',()), + self.precedence, + self.key, + _remove_md5_fragment(self.location), + self.py_version, + self.platform + ) + ) + def __hash__(self): return hash(self.hashcmp) + def __lt__(self, other): + return self.hashcmp < other.hashcmp + def __le__(self, other): + return self.hashcmp <= other.hashcmp + def __gt__(self, other): + return self.hashcmp > other.hashcmp + def __ge__(self, other): + return self.hashcmp >= other.hashcmp + def __eq__(self, other): + if not isinstance(other, self.__class__): + # It's not a Distribution, so they are not equal + return False + return self.hashcmp == other.hashcmp + def __ne__(self, other): + return not self == other + + # These properties have to be lazy so that we don't have to load any + # metadata until/unless it's actually needed. (i.e., some distributions + # may not know their name or version without loading PKG-INFO) + + #@property + def key(self): + try: + return self._key + except AttributeError: + self._key = key = self.project_name.lower() + return key + key = property(key) + + #@property + def parsed_version(self): + try: + return self._parsed_version + except AttributeError: + self._parsed_version = pv = parse_version(self.version) + return pv + + parsed_version = property(parsed_version) + + #@property + def version(self): + try: + return self._version + except AttributeError: + for line in self._get_metadata(self.PKG_INFO): + if line.lower().startswith('version:'): + self._version = safe_version(line.split(':',1)[1].strip()) + return self._version + else: + raise ValueError( + "Missing 'Version:' header and/or %s file" % self.PKG_INFO, self + ) + version = property(version) + + #@property + def _dep_map(self): + try: + return self.__dep_map + except AttributeError: + dm = self.__dep_map = {None: []} + for name in 'requires.txt', 'depends.txt': + for extra,reqs in split_sections(self._get_metadata(name)): + if extra: + if ':' in extra: + extra, marker = extra.split(':',1) + if invalid_marker(marker): + reqs=[] # XXX warn + elif not evaluate_marker(marker): + reqs=[] + extra = safe_extra(extra) or None + dm.setdefault(extra,[]).extend(parse_requirements(reqs)) + return dm + _dep_map = property(_dep_map) + + def requires(self,extras=()): + """List of Requirements needed for this distro if `extras` are used""" + dm = self._dep_map + deps = [] + deps.extend(dm.get(None,())) + for ext in extras: + try: + deps.extend(dm[safe_extra(ext)]) + except KeyError: + raise UnknownExtra( + "%s has no such extra feature %r" % (self, ext) + ) + return deps + + def _get_metadata(self,name): + if self.has_metadata(name): + for line in self.get_metadata_lines(name): + yield line + + def activate(self,path=None): + """Ensure distribution is importable on `path` (default=sys.path)""" + if path is None: path = sys.path + self.insert_on(path) + if path is sys.path: + fixup_namespace_packages(self.location) + list(map(declare_namespace, self._get_metadata('namespace_packages.txt'))) + + def egg_name(self): + """Return what this distribution's standard .egg filename should be""" + filename = "%s-%s-py%s" % ( + to_filename(self.project_name), to_filename(self.version), + self.py_version or PY_MAJOR + ) + + if self.platform: + filename += '-'+self.platform + return filename + + def __repr__(self): + if self.location: + return "%s (%s)" % (self,self.location) + else: + return str(self) + + def __str__(self): + try: version = getattr(self,'version',None) + except ValueError: version = None + version = version or "[unknown version]" + return "%s %s" % (self.project_name,version) + + def __getattr__(self,attr): + """Delegate all unrecognized public attributes to .metadata provider""" + if attr.startswith('_'): + raise AttributeError(attr) + return getattr(self._provider, attr) + + #@classmethod + def from_filename(cls,filename,metadata=None, **kw): + return cls.from_location( + _normalize_cached(filename), os.path.basename(filename), metadata, + **kw + ) + from_filename = classmethod(from_filename) + + def as_requirement(self): + """Return a ``Requirement`` that matches this distribution exactly""" + return Requirement.parse('%s==%s' % (self.project_name, self.version)) + + def load_entry_point(self, group, name): + """Return the `name` entry point of `group` or raise ImportError""" + ep = self.get_entry_info(group,name) + if ep is None: + raise ImportError("Entry point %r not found" % ((group,name),)) + return ep.load() + + def get_entry_map(self, group=None): + """Return the entry point map for `group`, or the full entry map""" + try: + ep_map = self._ep_map + except AttributeError: + ep_map = self._ep_map = EntryPoint.parse_map( + self._get_metadata('entry_points.txt'), self + ) + if group is not None: + return ep_map.get(group,{}) + return ep_map + + def get_entry_info(self, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return self.get_entry_map(group).get(name) + + def insert_on(self, path, loc = None): + """Insert self.location in path before its nearest parent directory""" + + loc = loc or self.location + if not loc: + return + + nloc = _normalize_cached(loc) + bdir = os.path.dirname(nloc) + npath= [(p and _normalize_cached(p) or p) for p in path] + + bp = None + for p, item in enumerate(npath): + if item==nloc: + break + elif item==bdir and self.precedence==EGG_DIST: + # if it's an .egg, give it precedence over its directory + if path is sys.path: + self.check_version_conflict() + path.insert(p, loc) + npath.insert(p, nloc) + break + else: + if path is sys.path: + self.check_version_conflict() + path.append(loc) + return + + # p is the spot where we found or inserted loc; now remove duplicates + while 1: + try: + np = npath.index(nloc, p+1) + except ValueError: + break + else: + del npath[np], path[np] + p = np # ha! + + return + + def check_version_conflict(self): + if self.key=='setuptools': + return # ignore the inevitable setuptools self-conflicts :( + + nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) + loc = normalize_path(self.location) + for modname in self._get_metadata('top_level.txt'): + if (modname not in sys.modules or modname in nsp + or modname in _namespace_packages): + continue + if modname in ('pkg_resources', 'setuptools', 'site'): + continue + fn = getattr(sys.modules[modname], '__file__', None) + if fn and (normalize_path(fn).startswith(loc) or + fn.startswith(self.location)): + continue + issue_warning( + "Module %s was already imported from %s, but %s is being added" + " to sys.path" % (modname, fn, self.location), + ) + + def has_version(self): + try: + self.version + except ValueError: + issue_warning("Unbuilt egg for "+repr(self)) + return False + return True + + def clone(self,**kw): + """Copy this distribution, substituting in any changed keyword args""" + for attr in ( + 'project_name', 'version', 'py_version', 'platform', 'location', + 'precedence' + ): + kw.setdefault(attr, getattr(self,attr,None)) + kw.setdefault('metadata', self._provider) + return self.__class__(**kw) + + #@property + def extras(self): + return [dep for dep in self._dep_map if dep] + extras = property(extras) + + +class DistInfoDistribution(Distribution): + """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" + PKG_INFO = 'METADATA' + EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") + + @property + def _parsed_pkg_info(self): + """Parse and cache metadata""" + try: + return self._pkg_info + except AttributeError: + from email.parser import Parser + self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO)) + return self._pkg_info + + @property + def _dep_map(self): + try: + return self.__dep_map + except AttributeError: + self.__dep_map = self._compute_dependencies() + return self.__dep_map + + def _preparse_requirement(self, requires_dist): + """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') + Split environment marker, add == prefix to version specifiers as + necessary, and remove parenthesis. + """ + parts = requires_dist.split(';', 1) + [''] + distvers = parts[0].strip() + mark = parts[1].strip() + distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) + distvers = distvers.replace('(', '').replace(')', '') + return (distvers, mark) + + def _compute_dependencies(self): + """Recompute this distribution's dependencies.""" + from _markerlib import compile as compile_marker + dm = self.__dep_map = {None: []} + + reqs = [] + # Including any condition expressions + for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: + distvers, mark = self._preparse_requirement(req) + parsed = next(parse_requirements(distvers)) + parsed.marker_fn = compile_marker(mark) + reqs.append(parsed) + + def reqs_for_extra(extra): + for req in reqs: + if req.marker_fn(override={'extra':extra}): + yield req + + common = frozenset(reqs_for_extra(None)) + dm[None].extend(common) + + for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: + extra = safe_extra(extra.strip()) + dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) + + return dm + + +_distributionImpl = { + '.egg': Distribution, + '.egg-info': Distribution, + '.dist-info': DistInfoDistribution, + } + + +def issue_warning(*args,**kw): + level = 1 + g = globals() + try: + # find the first stack frame that is *not* code in + # the pkg_resources module, to use for the warning + while sys._getframe(level).f_globals is g: + level += 1 + except ValueError: + pass + from warnings import warn + warn(stacklevel = level+1, *args, **kw) + + +def parse_requirements(strs): + """Yield ``Requirement`` objects for each specification in `strs` + + `strs` must be an instance of ``basestring``, or a (possibly-nested) + iterable thereof. + """ + # create a steppable iterator, so we can handle \-continuations + lines = iter(yield_lines(strs)) + + def scan_list(ITEM,TERMINATOR,line,p,groups,item_name): + + items = [] + + while not TERMINATOR(line,p): + if CONTINUE(line,p): + try: + line = next(lines) + p = 0 + except StopIteration: + raise ValueError( + "\\ must not appear on the last nonblank line" + ) + + match = ITEM(line,p) + if not match: + raise ValueError("Expected "+item_name+" in",line,"at",line[p:]) + + items.append(match.group(*groups)) + p = match.end() + + match = COMMA(line,p) + if match: + p = match.end() # skip the comma + elif not TERMINATOR(line,p): + raise ValueError( + "Expected ',' or end-of-list in",line,"at",line[p:] + ) + + match = TERMINATOR(line,p) + if match: p = match.end() # skip the terminator, if any + return line, p, items + + for line in lines: + match = DISTRO(line) + if not match: + raise ValueError("Missing distribution spec", line) + project_name = match.group(1) + p = match.end() + extras = [] + + match = OBRACKET(line,p) + if match: + p = match.end() + line, p, extras = scan_list( + DISTRO, CBRACKET, line, p, (1,), "'extra' name" + ) + + line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec") + specs = [(op,safe_version(val)) for op,val in specs] + yield Requirement(project_name, specs, extras) + + +def _sort_dists(dists): + tmp = [(dist.hashcmp,dist) for dist in dists] + tmp.sort() + dists[::-1] = [d for hc,d in tmp] + + +class Requirement: + def __init__(self, project_name, specs, extras): + """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" + self.unsafe_name, project_name = project_name, safe_name(project_name) + self.project_name, self.key = project_name, project_name.lower() + index = [(parse_version(v),state_machine[op],op,v) for op,v in specs] + index.sort() + self.specs = [(op,ver) for parsed,trans,op,ver in index] + self.index, self.extras = index, tuple(map(safe_extra,extras)) + self.hashCmp = ( + self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]), + frozenset(self.extras) + ) + self.__hash = hash(self.hashCmp) + + def __str__(self): + specs = ','.join([''.join(s) for s in self.specs]) + extras = ','.join(self.extras) + if extras: extras = '[%s]' % extras + return '%s%s%s' % (self.project_name, extras, specs) + + def __eq__(self,other): + return isinstance(other,Requirement) and self.hashCmp==other.hashCmp + + def __contains__(self,item): + if isinstance(item,Distribution): + if item.key != self.key: return False + if self.index: item = item.parsed_version # only get if we need it + elif isinstance(item,basestring): + item = parse_version(item) + last = None + compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1 + for parsed,trans,op,ver in self.index: + action = trans[compare(item,parsed)] # Indexing: 0, 1, -1 + if action=='F': + return False + elif action=='T': + return True + elif action=='+': + last = True + elif action=='-' or last is None: last = False + if last is None: last = True # no rules encountered + return last + + def __hash__(self): + return self.__hash + + def __repr__(self): return "Requirement.parse(%r)" % str(self) + + #@staticmethod + def parse(s): + reqs = list(parse_requirements(s)) + if reqs: + if len(reqs)==1: + return reqs[0] + raise ValueError("Expected only one requirement", s) + raise ValueError("No requirements found", s) + + parse = staticmethod(parse) + +state_machine = { + # =>< + '<': '--T', + '<=': 'T-T', + '>': 'F+F', + '>=': 'T+F', + '==': 'T..', + '!=': 'F++', +} + + +def _get_mro(cls): + """Get an mro for a type or classic class""" + if not isinstance(cls,type): + class cls(cls,object): pass + return cls.__mro__[1:] + return cls.__mro__ + +def _find_adapter(registry, ob): + """Return an adapter factory for `ob` from `registry`""" + for t in _get_mro(getattr(ob, '__class__', type(ob))): + if t in registry: + return registry[t] + + +def ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def split_sections(s): + """Split a string or iterable thereof into (section,content) pairs + + Each ``section`` is a stripped version of the section header ("[section]") + and each ``content`` is a list of stripped lines excluding blank lines and + comment-only lines. If there are any such lines before the first section + header, they're returned in a first ``section`` of ``None``. + """ + section = None + content = [] + for line in yield_lines(s): + if line.startswith("["): + if line.endswith("]"): + if section or content: + yield section, content + section = line[1:-1].strip() + content = [] + else: + raise ValueError("Invalid section heading", line) + else: + content.append(line) + + # wrap up last segment + yield section, content + +def _mkstemp(*args,**kw): + from tempfile import mkstemp + old_open = os.open + try: + os.open = os_open # temporarily bypass sandboxing + return mkstemp(*args,**kw) + finally: + os.open = old_open # and then put it back + + +# Set up global resource manager (deliberately not state-saved) +_manager = ResourceManager() +def _initialize(g): + for name in dir(_manager): + if not name.startswith('_'): + g[name] = getattr(_manager, name) +_initialize(globals()) + +# Prepare the master working set and make the ``require()`` API available +_declare_state('object', working_set = WorkingSet()) +try: + # Does the main program list any requirements? + from __main__ import __requires__ +except ImportError: + pass # No: just use the default working set based on sys.path +else: + # Yes: ensure the requirements are met, by prefixing sys.path if necessary + try: + working_set.require(__requires__) + except VersionConflict: # try it without defaults already on sys.path + working_set = WorkingSet([]) # by starting with an empty path + for dist in working_set.resolve( + parse_requirements(__requires__), Environment() + ): + working_set.add(dist) + for entry in sys.path: # add any missing entries from sys.path + if entry not in working_set.entries: + working_set.add_entry(entry) + sys.path[:] = working_set.entries # then copy back to sys.path + +require = working_set.require +iter_entry_points = working_set.iter_entry_points +add_activation_listener = working_set.subscribe +run_script = working_set.run_script +run_main = run_script # backward compatibility +# Activate all distributions already on sys.path, and ensure that +# all distributions added to the working set in the future (e.g. by +# calling ``require()``) will get activated as well. +add_activation_listener(lambda dist: dist.activate()) +working_set.entries=[] +list(map(working_set.add_entry,sys.path)) # match order diff --git a/awx/lib/site-packages/prettytable.py b/awx/lib/site-packages/prettytable.py new file mode 100644 index 0000000000..8abb952b9b --- /dev/null +++ b/awx/lib/site-packages/prettytable.py @@ -0,0 +1,1475 @@ +#!/usr/bin/env python +# +# Copyright (c) 2009-2013, Luke Maurits <luke@maurits.id.au> +# All rights reserved. +# With contributions from: +# * Chris Clark +# * Klein Stephane +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * The name of the author may not be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +__version__ = "0.7.2" + +import copy +import csv +import random +import re +import sys +import textwrap +import itertools +import unicodedata + +py3k = sys.version_info[0] >= 3 +if py3k: + unicode = str + basestring = str + itermap = map + iterzip = zip + uni_chr = chr + from html.parser import HTMLParser +else: + itermap = itertools.imap + iterzip = itertools.izip + uni_chr = unichr + from HTMLParser import HTMLParser + +if py3k and sys.version_info[1] >= 2: + from html import escape +else: + from cgi import escape + +# hrule styles +FRAME = 0 +ALL = 1 +NONE = 2 +HEADER = 3 + +# Table styles +DEFAULT = 10 +MSWORD_FRIENDLY = 11 +PLAIN_COLUMNS = 12 +RANDOM = 20 + +_re = re.compile("\033\[[0-9;]*m") + +def _get_size(text): + lines = text.split("\n") + height = len(lines) + width = max([_str_block_width(line) for line in lines]) + return (width, height) + +class PrettyTable(object): + + def __init__(self, field_names=None, **kwargs): + + """Return a new PrettyTable instance + + Arguments: + + encoding - Unicode encoding scheme used to decode any encoded input + field_names - list or tuple of field names + fields - list or tuple of field names to include in displays + start - index of first data row to include in output + end - index of last data row to include in output PLUS ONE (list slice style) + header - print a header showing field names (True or False) + header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None) + border - print a border around the table (True or False) + hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, HEADER, ALL, NONE + vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE + int_format - controls formatting of integer data + float_format - controls formatting of floating point data + padding_width - number of spaces on either side of column data (only used if left and right paddings are None) + left_padding_width - number of spaces on left hand side of column data + right_padding_width - number of spaces on right hand side of column data + vertical_char - single character string used to draw vertical lines + horizontal_char - single character string used to draw horizontal lines + junction_char - single character string used to draw line junctions + sortby - name of field to sort rows by + sort_key - sorting key function, applied to data points before sorting + valign - default valign for each row (None, "t", "m" or "b") + reversesort - True or False to sort in descending or ascending order""" + + self.encoding = kwargs.get("encoding", "UTF-8") + + # Data + self._field_names = [] + self._align = {} + self._valign = {} + self._max_width = {} + self._rows = [] + if field_names: + self.field_names = field_names + else: + self._widths = [] + + # Options + self._options = "start end fields header border sortby reversesort sort_key attributes format hrules vrules".split() + self._options.extend("int_format float_format padding_width left_padding_width right_padding_width".split()) + self._options.extend("vertical_char horizontal_char junction_char header_style valign xhtml print_empty".split()) + for option in self._options: + if option in kwargs: + self._validate_option(option, kwargs[option]) + else: + kwargs[option] = None + + self._start = kwargs["start"] or 0 + self._end = kwargs["end"] or None + self._fields = kwargs["fields"] or None + + if kwargs["header"] in (True, False): + self._header = kwargs["header"] + else: + self._header = True + self._header_style = kwargs["header_style"] or None + if kwargs["border"] in (True, False): + self._border = kwargs["border"] + else: + self._border = True + self._hrules = kwargs["hrules"] or FRAME + self._vrules = kwargs["vrules"] or ALL + + self._sortby = kwargs["sortby"] or None + if kwargs["reversesort"] in (True, False): + self._reversesort = kwargs["reversesort"] + else: + self._reversesort = False + self._sort_key = kwargs["sort_key"] or (lambda x: x) + + self._int_format = kwargs["int_format"] or {} + self._float_format = kwargs["float_format"] or {} + self._padding_width = kwargs["padding_width"] or 1 + self._left_padding_width = kwargs["left_padding_width"] or None + self._right_padding_width = kwargs["right_padding_width"] or None + + self._vertical_char = kwargs["vertical_char"] or self._unicode("|") + self._horizontal_char = kwargs["horizontal_char"] or self._unicode("-") + self._junction_char = kwargs["junction_char"] or self._unicode("+") + + if kwargs["print_empty"] in (True, False): + self._print_empty = kwargs["print_empty"] + else: + self._print_empty = True + self._format = kwargs["format"] or False + self._xhtml = kwargs["xhtml"] or False + self._attributes = kwargs["attributes"] or {} + + def _unicode(self, value): + if not isinstance(value, basestring): + value = str(value) + if not isinstance(value, unicode): + value = unicode(value, self.encoding, "strict") + return value + + def _justify(self, text, width, align): + excess = width - _str_block_width(text) + if align == "l": + return text + excess * " " + elif align == "r": + return excess * " " + text + else: + if excess % 2: + # Uneven padding + # Put more space on right if text is of odd length... + if _str_block_width(text) % 2: + return (excess//2)*" " + text + (excess//2 + 1)*" " + # and more space on left if text is of even length + else: + return (excess//2 + 1)*" " + text + (excess//2)*" " + # Why distribute extra space this way? To match the behaviour of + # the inbuilt str.center() method. + else: + # Equal padding on either side + return (excess//2)*" " + text + (excess//2)*" " + + def __getattr__(self, name): + + if name == "rowcount": + return len(self._rows) + elif name == "colcount": + if self._field_names: + return len(self._field_names) + elif self._rows: + return len(self._rows[0]) + else: + return 0 + else: + raise AttributeError(name) + + def __getitem__(self, index): + + new = PrettyTable() + new.field_names = self.field_names + for attr in self._options: + setattr(new, "_"+attr, getattr(self, "_"+attr)) + setattr(new, "_align", getattr(self, "_align")) + if isinstance(index, slice): + for row in self._rows[index]: + new.add_row(row) + elif isinstance(index, int): + new.add_row(self._rows[index]) + else: + raise Exception("Index %s is invalid, must be an integer or slice" % str(index)) + return new + + if py3k: + def __str__(self): + return self.__unicode__() + else: + def __str__(self): + return self.__unicode__().encode(self.encoding) + + def __unicode__(self): + return self.get_string() + + ############################## + # ATTRIBUTE VALIDATORS # + ############################## + + # The method _validate_option is all that should be used elsewhere in the code base to validate options. + # It will call the appropriate validation method for that option. The individual validation methods should + # never need to be called directly (although nothing bad will happen if they *are*). + # Validation happens in TWO places. + # Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section. + # Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings + + def _validate_option(self, option, val): + if option in ("field_names"): + self._validate_field_names(val) + elif option in ("start", "end", "max_width", "padding_width", "left_padding_width", "right_padding_width", "format"): + self._validate_nonnegative_int(option, val) + elif option in ("sortby"): + self._validate_field_name(option, val) + elif option in ("sort_key"): + self._validate_function(option, val) + elif option in ("hrules"): + self._validate_hrules(option, val) + elif option in ("vrules"): + self._validate_vrules(option, val) + elif option in ("fields"): + self._validate_all_field_names(option, val) + elif option in ("header", "border", "reversesort", "xhtml", "print_empty"): + self._validate_true_or_false(option, val) + elif option in ("header_style"): + self._validate_header_style(val) + elif option in ("int_format"): + self._validate_int_format(option, val) + elif option in ("float_format"): + self._validate_float_format(option, val) + elif option in ("vertical_char", "horizontal_char", "junction_char"): + self._validate_single_char(option, val) + elif option in ("attributes"): + self._validate_attributes(option, val) + else: + raise Exception("Unrecognised option: %s!" % option) + + def _validate_field_names(self, val): + # Check for appropriate length + if self._field_names: + try: + assert len(val) == len(self._field_names) + except AssertionError: + raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._field_names))) + if self._rows: + try: + assert len(val) == len(self._rows[0]) + except AssertionError: + raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._rows[0]))) + # Check for uniqueness + try: + assert len(val) == len(set(val)) + except AssertionError: + raise Exception("Field names must be unique!") + + def _validate_header_style(self, val): + try: + assert val in ("cap", "title", "upper", "lower", None) + except AssertionError: + raise Exception("Invalid header style, use cap, title, upper, lower or None!") + + def _validate_align(self, val): + try: + assert val in ["l","c","r"] + except AssertionError: + raise Exception("Alignment %s is invalid, use l, c or r!" % val) + + def _validate_valign(self, val): + try: + assert val in ["t","m","b",None] + except AssertionError: + raise Exception("Alignment %s is invalid, use t, m, b or None!" % val) + + def _validate_nonnegative_int(self, name, val): + try: + assert int(val) >= 0 + except AssertionError: + raise Exception("Invalid value for %s: %s!" % (name, self._unicode(val))) + + def _validate_true_or_false(self, name, val): + try: + assert val in (True, False) + except AssertionError: + raise Exception("Invalid value for %s! Must be True or False." % name) + + def _validate_int_format(self, name, val): + if val == "": + return + try: + assert type(val) in (str, unicode) + assert val.isdigit() + except AssertionError: + raise Exception("Invalid value for %s! Must be an integer format string." % name) + + def _validate_float_format(self, name, val): + if val == "": + return + try: + assert type(val) in (str, unicode) + assert "." in val + bits = val.split(".") + assert len(bits) <= 2 + assert bits[0] == "" or bits[0].isdigit() + assert bits[1] == "" or bits[1].isdigit() + except AssertionError: + raise Exception("Invalid value for %s! Must be a float format string." % name) + + def _validate_function(self, name, val): + try: + assert hasattr(val, "__call__") + except AssertionError: + raise Exception("Invalid value for %s! Must be a function." % name) + + def _validate_hrules(self, name, val): + try: + assert val in (ALL, FRAME, HEADER, NONE) + except AssertionError: + raise Exception("Invalid value for %s! Must be ALL, FRAME, HEADER or NONE." % name) + + def _validate_vrules(self, name, val): + try: + assert val in (ALL, FRAME, NONE) + except AssertionError: + raise Exception("Invalid value for %s! Must be ALL, FRAME, or NONE." % name) + + def _validate_field_name(self, name, val): + try: + assert (val in self._field_names) or (val is None) + except AssertionError: + raise Exception("Invalid field name: %s!" % val) + + def _validate_all_field_names(self, name, val): + try: + for x in val: + self._validate_field_name(name, x) + except AssertionError: + raise Exception("fields must be a sequence of field names!") + + def _validate_single_char(self, name, val): + try: + assert _str_block_width(val) == 1 + except AssertionError: + raise Exception("Invalid value for %s! Must be a string of length 1." % name) + + def _validate_attributes(self, name, val): + try: + assert isinstance(val, dict) + except AssertionError: + raise Exception("attributes must be a dictionary of name/value pairs!") + + ############################## + # ATTRIBUTE MANAGEMENT # + ############################## + + def _get_field_names(self): + return self._field_names + """The names of the fields + + Arguments: + + fields - list or tuple of field names""" + def _set_field_names(self, val): + val = [self._unicode(x) for x in val] + self._validate_option("field_names", val) + if self._field_names: + old_names = self._field_names[:] + self._field_names = val + if self._align and old_names: + for old_name, new_name in zip(old_names, val): + self._align[new_name] = self._align[old_name] + for old_name in old_names: + if old_name not in self._align: + self._align.pop(old_name) + else: + for field in self._field_names: + self._align[field] = "c" + if self._valign and old_names: + for old_name, new_name in zip(old_names, val): + self._valign[new_name] = self._valign[old_name] + for old_name in old_names: + if old_name not in self._valign: + self._valign.pop(old_name) + else: + for field in self._field_names: + self._valign[field] = "t" + field_names = property(_get_field_names, _set_field_names) + + def _get_align(self): + return self._align + def _set_align(self, val): + self._validate_align(val) + for field in self._field_names: + self._align[field] = val + align = property(_get_align, _set_align) + + def _get_valign(self): + return self._valign + def _set_valign(self, val): + self._validate_valign(val) + for field in self._field_names: + self._valign[field] = val + valign = property(_get_valign, _set_valign) + + def _get_max_width(self): + return self._max_width + def _set_max_width(self, val): + self._validate_option("max_width", val) + for field in self._field_names: + self._max_width[field] = val + max_width = property(_get_max_width, _set_max_width) + + def _get_fields(self): + """List or tuple of field names to include in displays + + Arguments: + + fields - list or tuple of field names to include in displays""" + return self._fields + def _set_fields(self, val): + self._validate_option("fields", val) + self._fields = val + fields = property(_get_fields, _set_fields) + + def _get_start(self): + """Start index of the range of rows to print + + Arguments: + + start - index of first data row to include in output""" + return self._start + + def _set_start(self, val): + self._validate_option("start", val) + self._start = val + start = property(_get_start, _set_start) + + def _get_end(self): + """End index of the range of rows to print + + Arguments: + + end - index of last data row to include in output PLUS ONE (list slice style)""" + return self._end + def _set_end(self, val): + self._validate_option("end", val) + self._end = val + end = property(_get_end, _set_end) + + def _get_sortby(self): + """Name of field by which to sort rows + + Arguments: + + sortby - field name to sort by""" + return self._sortby + def _set_sortby(self, val): + self._validate_option("sortby", val) + self._sortby = val + sortby = property(_get_sortby, _set_sortby) + + def _get_reversesort(self): + """Controls direction of sorting (ascending vs descending) + + Arguments: + + reveresort - set to True to sort by descending order, or False to sort by ascending order""" + return self._reversesort + def _set_reversesort(self, val): + self._validate_option("reversesort", val) + self._reversesort = val + reversesort = property(_get_reversesort, _set_reversesort) + + def _get_sort_key(self): + """Sorting key function, applied to data points before sorting + + Arguments: + + sort_key - a function which takes one argument and returns something to be sorted""" + return self._sort_key + def _set_sort_key(self, val): + self._validate_option("sort_key", val) + self._sort_key = val + sort_key = property(_get_sort_key, _set_sort_key) + + def _get_header(self): + """Controls printing of table header with field names + + Arguments: + + header - print a header showing field names (True or False)""" + return self._header + def _set_header(self, val): + self._validate_option("header", val) + self._header = val + header = property(_get_header, _set_header) + + def _get_header_style(self): + """Controls stylisation applied to field names in header + + Arguments: + + header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)""" + return self._header_style + def _set_header_style(self, val): + self._validate_header_style(val) + self._header_style = val + header_style = property(_get_header_style, _set_header_style) + + def _get_border(self): + """Controls printing of border around table + + Arguments: + + border - print a border around the table (True or False)""" + return self._border + def _set_border(self, val): + self._validate_option("border", val) + self._border = val + border = property(_get_border, _set_border) + + def _get_hrules(self): + """Controls printing of horizontal rules after rows + + Arguments: + + hrules - horizontal rules style. Allowed values: FRAME, ALL, HEADER, NONE""" + return self._hrules + def _set_hrules(self, val): + self._validate_option("hrules", val) + self._hrules = val + hrules = property(_get_hrules, _set_hrules) + + def _get_vrules(self): + """Controls printing of vertical rules between columns + + Arguments: + + vrules - vertical rules style. Allowed values: FRAME, ALL, NONE""" + return self._vrules + def _set_vrules(self, val): + self._validate_option("vrules", val) + self._vrules = val + vrules = property(_get_vrules, _set_vrules) + + def _get_int_format(self): + """Controls formatting of integer data + Arguments: + + int_format - integer format string""" + return self._int_format + def _set_int_format(self, val): +# self._validate_option("int_format", val) + for field in self._field_names: + self._int_format[field] = val + int_format = property(_get_int_format, _set_int_format) + + def _get_float_format(self): + """Controls formatting of floating point data + Arguments: + + float_format - floating point format string""" + return self._float_format + def _set_float_format(self, val): +# self._validate_option("float_format", val) + for field in self._field_names: + self._float_format[field] = val + float_format = property(_get_float_format, _set_float_format) + + def _get_padding_width(self): + """The number of empty spaces between a column's edge and its content + + Arguments: + + padding_width - number of spaces, must be a positive integer""" + return self._padding_width + def _set_padding_width(self, val): + self._validate_option("padding_width", val) + self._padding_width = val + padding_width = property(_get_padding_width, _set_padding_width) + + def _get_left_padding_width(self): + """The number of empty spaces between a column's left edge and its content + + Arguments: + + left_padding - number of spaces, must be a positive integer""" + return self._left_padding_width + def _set_left_padding_width(self, val): + self._validate_option("left_padding_width", val) + self._left_padding_width = val + left_padding_width = property(_get_left_padding_width, _set_left_padding_width) + + def _get_right_padding_width(self): + """The number of empty spaces between a column's right edge and its content + + Arguments: + + right_padding - number of spaces, must be a positive integer""" + return self._right_padding_width + def _set_right_padding_width(self, val): + self._validate_option("right_padding_width", val) + self._right_padding_width = val + right_padding_width = property(_get_right_padding_width, _set_right_padding_width) + + def _get_vertical_char(self): + """The charcter used when printing table borders to draw vertical lines + + Arguments: + + vertical_char - single character string used to draw vertical lines""" + return self._vertical_char + def _set_vertical_char(self, val): + val = self._unicode(val) + self._validate_option("vertical_char", val) + self._vertical_char = val + vertical_char = property(_get_vertical_char, _set_vertical_char) + + def _get_horizontal_char(self): + """The charcter used when printing table borders to draw horizontal lines + + Arguments: + + horizontal_char - single character string used to draw horizontal lines""" + return self._horizontal_char + def _set_horizontal_char(self, val): + val = self._unicode(val) + self._validate_option("horizontal_char", val) + self._horizontal_char = val + horizontal_char = property(_get_horizontal_char, _set_horizontal_char) + + def _get_junction_char(self): + """The charcter used when printing table borders to draw line junctions + + Arguments: + + junction_char - single character string used to draw line junctions""" + return self._junction_char + def _set_junction_char(self, val): + val = self._unicode(val) + self._validate_option("vertical_char", val) + self._junction_char = val + junction_char = property(_get_junction_char, _set_junction_char) + + def _get_format(self): + """Controls whether or not HTML tables are formatted to match styling options + + Arguments: + + format - True or False""" + return self._format + def _set_format(self, val): + self._validate_option("format", val) + self._format = val + format = property(_get_format, _set_format) + + def _get_print_empty(self): + """Controls whether or not empty tables produce a header and frame or just an empty string + + Arguments: + + print_empty - True or False""" + return self._print_empty + def _set_print_empty(self, val): + self._validate_option("print_empty", val) + self._print_empty = val + print_empty = property(_get_print_empty, _set_print_empty) + + def _get_attributes(self): + """A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML + + Arguments: + + attributes - dictionary of attributes""" + return self._attributes + def _set_attributes(self, val): + self._validate_option("attributes", val) + self._attributes = val + attributes = property(_get_attributes, _set_attributes) + + ############################## + # OPTION MIXER # + ############################## + + def _get_options(self, kwargs): + + options = {} + for option in self._options: + if option in kwargs: + self._validate_option(option, kwargs[option]) + options[option] = kwargs[option] + else: + options[option] = getattr(self, "_"+option) + return options + + ############################## + # PRESET STYLE LOGIC # + ############################## + + def set_style(self, style): + + if style == DEFAULT: + self._set_default_style() + elif style == MSWORD_FRIENDLY: + self._set_msword_style() + elif style == PLAIN_COLUMNS: + self._set_columns_style() + elif style == RANDOM: + self._set_random_style() + else: + raise Exception("Invalid pre-set style!") + + def _set_default_style(self): + + self.header = True + self.border = True + self._hrules = FRAME + self._vrules = ALL + self.padding_width = 1 + self.left_padding_width = 1 + self.right_padding_width = 1 + self.vertical_char = "|" + self.horizontal_char = "-" + self.junction_char = "+" + + def _set_msword_style(self): + + self.header = True + self.border = True + self._hrules = NONE + self.padding_width = 1 + self.left_padding_width = 1 + self.right_padding_width = 1 + self.vertical_char = "|" + + def _set_columns_style(self): + + self.header = True + self.border = False + self.padding_width = 1 + self.left_padding_width = 0 + self.right_padding_width = 8 + + def _set_random_style(self): + + # Just for fun! + self.header = random.choice((True, False)) + self.border = random.choice((True, False)) + self._hrules = random.choice((ALL, FRAME, HEADER, NONE)) + self._vrules = random.choice((ALL, FRAME, NONE)) + self.left_padding_width = random.randint(0,5) + self.right_padding_width = random.randint(0,5) + self.vertical_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?") + self.horizontal_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?") + self.junction_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?") + + ############################## + # DATA INPUT METHODS # + ############################## + + def add_row(self, row): + + """Add a row to the table + + Arguments: + + row - row of data, should be a list with as many elements as the table + has fields""" + + if self._field_names and len(row) != len(self._field_names): + raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names))) + if not self._field_names: + self.field_names = [("Field %d" % (n+1)) for n in range(0,len(row))] + self._rows.append(list(row)) + + def del_row(self, row_index): + + """Delete a row to the table + + Arguments: + + row_index - The index of the row you want to delete. Indexing starts at 0.""" + + if row_index > len(self._rows)-1: + raise Exception("Cant delete row at index %d, table only has %d rows!" % (row_index, len(self._rows))) + del self._rows[row_index] + + def add_column(self, fieldname, column, align="c", valign="t"): + + """Add a column to the table. + + Arguments: + + fieldname - name of the field to contain the new column of data + column - column of data, should be a list with as many elements as the + table has rows + align - desired alignment for this column - "l" for left, "c" for centre and "r" for right + valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom""" + + if len(self._rows) in (0, len(column)): + self._validate_align(align) + self._validate_valign(valign) + self._field_names.append(fieldname) + self._align[fieldname] = align + self._valign[fieldname] = valign + for i in range(0, len(column)): + if len(self._rows) < i+1: + self._rows.append([]) + self._rows[i].append(column[i]) + else: + raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self._rows))) + + def clear_rows(self): + + """Delete all rows from the table but keep the current field names""" + + self._rows = [] + + def clear(self): + + """Delete all rows and field names from the table, maintaining nothing but styling options""" + + self._rows = [] + self._field_names = [] + self._widths = [] + + ############################## + # MISC PUBLIC METHODS # + ############################## + + def copy(self): + return copy.deepcopy(self) + + ############################## + # MISC PRIVATE METHODS # + ############################## + + def _format_value(self, field, value): + if isinstance(value, int) and field in self._int_format: + value = self._unicode(("%%%sd" % self._int_format[field]) % value) + elif isinstance(value, float) and field in self._float_format: + value = self._unicode(("%%%sf" % self._float_format[field]) % value) + return self._unicode(value) + + def _compute_widths(self, rows, options): + if options["header"]: + widths = [_get_size(field)[0] for field in self._field_names] + else: + widths = len(self.field_names) * [0] + for row in rows: + for index, value in enumerate(row): + fieldname = self.field_names[index] + if fieldname in self.max_width: + widths[index] = max(widths[index], min(_get_size(value)[0], self.max_width[fieldname])) + else: + widths[index] = max(widths[index], _get_size(value)[0]) + self._widths = widths + + def _get_padding_widths(self, options): + + if options["left_padding_width"] is not None: + lpad = options["left_padding_width"] + else: + lpad = options["padding_width"] + if options["right_padding_width"] is not None: + rpad = options["right_padding_width"] + else: + rpad = options["padding_width"] + return lpad, rpad + + def _get_rows(self, options): + """Return only those data rows that should be printed, based on slicing and sorting. + + Arguments: + + options - dictionary of option settings.""" + + # Make a copy of only those rows in the slice range + rows = copy.deepcopy(self._rows[options["start"]:options["end"]]) + # Sort if necessary + if options["sortby"]: + sortindex = self._field_names.index(options["sortby"]) + # Decorate + rows = [[row[sortindex]]+row for row in rows] + # Sort + rows.sort(reverse=options["reversesort"], key=options["sort_key"]) + # Undecorate + rows = [row[1:] for row in rows] + return rows + + def _format_row(self, row, options): + return [self._format_value(field, value) for (field, value) in zip(self._field_names, row)] + + def _format_rows(self, rows, options): + return [self._format_row(row, options) for row in rows] + + ############################## + # PLAIN TEXT STRING METHODS # + ############################## + + def get_string(self, **kwargs): + + """Return string representation of table in current state. + + Arguments: + + start - index of first data row to include in output + end - index of last data row to include in output PLUS ONE (list slice style) + fields - names of fields (columns) to include + header - print a header showing field names (True or False) + border - print a border around the table (True or False) + hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE + vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE + int_format - controls formatting of integer data + float_format - controls formatting of floating point data + padding_width - number of spaces on either side of column data (only used if left and right paddings are None) + left_padding_width - number of spaces on left hand side of column data + right_padding_width - number of spaces on right hand side of column data + vertical_char - single character string used to draw vertical lines + horizontal_char - single character string used to draw horizontal lines + junction_char - single character string used to draw line junctions + sortby - name of field to sort rows by + sort_key - sorting key function, applied to data points before sorting + reversesort - True or False to sort in descending or ascending order + print empty - if True, stringify just the header for an empty table, if False return an empty string """ + + options = self._get_options(kwargs) + + lines = [] + + # Don't think too hard about an empty table + # Is this the desired behaviour? Maybe we should still print the header? + if self.rowcount == 0 and (not options["print_empty"] or not options["border"]): + return "" + + # Get the rows we need to print, taking into account slicing, sorting, etc. + rows = self._get_rows(options) + + # Turn all data in all rows into Unicode, formatted as desired + formatted_rows = self._format_rows(rows, options) + + # Compute column widths + self._compute_widths(formatted_rows, options) + + # Add header or top of border + self._hrule = self._stringify_hrule(options) + if options["header"]: + lines.append(self._stringify_header(options)) + elif options["border"] and options["hrules"] in (ALL, FRAME): + lines.append(self._hrule) + + # Add rows + for row in formatted_rows: + lines.append(self._stringify_row(row, options)) + + # Add bottom of border + if options["border"] and options["hrules"] == FRAME: + lines.append(self._hrule) + + return self._unicode("\n").join(lines) + + def _stringify_hrule(self, options): + + if not options["border"]: + return "" + lpad, rpad = self._get_padding_widths(options) + if options['vrules'] in (ALL, FRAME): + bits = [options["junction_char"]] + else: + bits = [options["horizontal_char"]] + # For tables with no data or fieldnames + if not self._field_names: + bits.append(options["junction_char"]) + return "".join(bits) + for field, width in zip(self._field_names, self._widths): + if options["fields"] and field not in options["fields"]: + continue + bits.append((width+lpad+rpad)*options["horizontal_char"]) + if options['vrules'] == ALL: + bits.append(options["junction_char"]) + else: + bits.append(options["horizontal_char"]) + if options["vrules"] == FRAME: + bits.pop() + bits.append(options["junction_char"]) + return "".join(bits) + + def _stringify_header(self, options): + + bits = [] + lpad, rpad = self._get_padding_widths(options) + if options["border"]: + if options["hrules"] in (ALL, FRAME): + bits.append(self._hrule) + bits.append("\n") + if options["vrules"] in (ALL, FRAME): + bits.append(options["vertical_char"]) + else: + bits.append(" ") + # For tables with no data or field names + if not self._field_names: + if options["vrules"] in (ALL, FRAME): + bits.append(options["vertical_char"]) + else: + bits.append(" ") + for field, width, in zip(self._field_names, self._widths): + if options["fields"] and field not in options["fields"]: + continue + if self._header_style == "cap": + fieldname = field.capitalize() + elif self._header_style == "title": + fieldname = field.title() + elif self._header_style == "upper": + fieldname = field.upper() + elif self._header_style == "lower": + fieldname = field.lower() + else: + fieldname = field + bits.append(" " * lpad + self._justify(fieldname, width, self._align[field]) + " " * rpad) + if options["border"]: + if options["vrules"] == ALL: + bits.append(options["vertical_char"]) + else: + bits.append(" ") + # If vrules is FRAME, then we just appended a space at the end + # of the last field, when we really want a vertical character + if options["border"] and options["vrules"] == FRAME: + bits.pop() + bits.append(options["vertical_char"]) + if options["border"] and options["hrules"] != NONE: + bits.append("\n") + bits.append(self._hrule) + return "".join(bits) + + def _stringify_row(self, row, options): + + for index, field, value, width, in zip(range(0,len(row)), self._field_names, row, self._widths): + # Enforce max widths + lines = value.split("\n") + new_lines = [] + for line in lines: + if _str_block_width(line) > width: + line = textwrap.fill(line, width) + new_lines.append(line) + lines = new_lines + value = "\n".join(lines) + row[index] = value + + row_height = 0 + for c in row: + h = _get_size(c)[1] + if h > row_height: + row_height = h + + bits = [] + lpad, rpad = self._get_padding_widths(options) + for y in range(0, row_height): + bits.append([]) + if options["border"]: + if options["vrules"] in (ALL, FRAME): + bits[y].append(self.vertical_char) + else: + bits[y].append(" ") + + for field, value, width, in zip(self._field_names, row, self._widths): + + valign = self._valign[field] + lines = value.split("\n") + dHeight = row_height - len(lines) + if dHeight: + if valign == "m": + lines = [""] * int(dHeight / 2) + lines + [""] * (dHeight - int(dHeight / 2)) + elif valign == "b": + lines = [""] * dHeight + lines + else: + lines = lines + [""] * dHeight + + y = 0 + for l in lines: + if options["fields"] and field not in options["fields"]: + continue + + bits[y].append(" " * lpad + self._justify(l, width, self._align[field]) + " " * rpad) + if options["border"]: + if options["vrules"] == ALL: + bits[y].append(self.vertical_char) + else: + bits[y].append(" ") + y += 1 + + # If vrules is FRAME, then we just appended a space at the end + # of the last field, when we really want a vertical character + for y in range(0, row_height): + if options["border"] and options["vrules"] == FRAME: + bits[y].pop() + bits[y].append(options["vertical_char"]) + + if options["border"] and options["hrules"]== ALL: + bits[row_height-1].append("\n") + bits[row_height-1].append(self._hrule) + + for y in range(0, row_height): + bits[y] = "".join(bits[y]) + + return "\n".join(bits) + + ############################## + # HTML STRING METHODS # + ############################## + + def get_html_string(self, **kwargs): + + """Return string representation of HTML formatted version of table in current state. + + Arguments: + + start - index of first data row to include in output + end - index of last data row to include in output PLUS ONE (list slice style) + fields - names of fields (columns) to include + header - print a header showing field names (True or False) + border - print a border around the table (True or False) + hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE + vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE + int_format - controls formatting of integer data + float_format - controls formatting of floating point data + padding_width - number of spaces on either side of column data (only used if left and right paddings are None) + left_padding_width - number of spaces on left hand side of column data + right_padding_width - number of spaces on right hand side of column data + sortby - name of field to sort rows by + sort_key - sorting key function, applied to data points before sorting + attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag + xhtml - print <br/> tags if True, <br> tags if false""" + + options = self._get_options(kwargs) + + if options["format"]: + string = self._get_formatted_html_string(options) + else: + string = self._get_simple_html_string(options) + + return string + + def _get_simple_html_string(self, options): + + lines = [] + if options["xhtml"]: + linebreak = "<br/>" + else: + linebreak = "<br>" + + open_tag = [] + open_tag.append("<table") + if options["attributes"]: + for attr_name in options["attributes"]: + open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name])) + open_tag.append(">") + lines.append("".join(open_tag)) + + # Headers + if options["header"]: + lines.append(" <tr>") + for field in self._field_names: + if options["fields"] and field not in options["fields"]: + continue + lines.append(" <th>%s</th>" % escape(field).replace("\n", linebreak)) + lines.append(" </tr>") + + # Data + rows = self._get_rows(options) + formatted_rows = self._format_rows(rows, options) + for row in formatted_rows: + lines.append(" <tr>") + for field, datum in zip(self._field_names, row): + if options["fields"] and field not in options["fields"]: + continue + lines.append(" <td>%s</td>" % escape(datum).replace("\n", linebreak)) + lines.append(" </tr>") + + lines.append("</table>") + + return self._unicode("\n").join(lines) + + def _get_formatted_html_string(self, options): + + lines = [] + lpad, rpad = self._get_padding_widths(options) + if options["xhtml"]: + linebreak = "<br/>" + else: + linebreak = "<br>" + + open_tag = [] + open_tag.append("<table") + if options["border"]: + if options["hrules"] == ALL and options["vrules"] == ALL: + open_tag.append(" frame=\"box\" rules=\"all\"") + elif options["hrules"] == FRAME and options["vrules"] == FRAME: + open_tag.append(" frame=\"box\"") + elif options["hrules"] == FRAME and options["vrules"] == ALL: + open_tag.append(" frame=\"box\" rules=\"cols\"") + elif options["hrules"] == FRAME: + open_tag.append(" frame=\"hsides\"") + elif options["hrules"] == ALL: + open_tag.append(" frame=\"hsides\" rules=\"rows\"") + elif options["vrules"] == FRAME: + open_tag.append(" frame=\"vsides\"") + elif options["vrules"] == ALL: + open_tag.append(" frame=\"vsides\" rules=\"cols\"") + if options["attributes"]: + for attr_name in options["attributes"]: + open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name])) + open_tag.append(">") + lines.append("".join(open_tag)) + + # Headers + if options["header"]: + lines.append(" <tr>") + for field in self._field_names: + if options["fields"] and field not in options["fields"]: + continue + lines.append(" <th style=\"padding-left: %dem; padding-right: %dem; text-align: center\">%s</th>" % (lpad, rpad, escape(field).replace("\n", linebreak))) + lines.append(" </tr>") + + # Data + rows = self._get_rows(options) + formatted_rows = self._format_rows(rows, options) + aligns = [] + valigns = [] + for field in self._field_names: + aligns.append({ "l" : "left", "r" : "right", "c" : "center" }[self._align[field]]) + valigns.append({"t" : "top", "m" : "middle", "b" : "bottom"}[self._valign[field]]) + for row in formatted_rows: + lines.append(" <tr>") + for field, datum, align, valign in zip(self._field_names, row, aligns, valigns): + if options["fields"] and field not in options["fields"]: + continue + lines.append(" <td style=\"padding-left: %dem; padding-right: %dem; text-align: %s; vertical-align: %s\">%s</td>" % (lpad, rpad, align, valign, escape(datum).replace("\n", linebreak))) + lines.append(" </tr>") + lines.append("</table>") + + return self._unicode("\n").join(lines) + +############################## +# UNICODE WIDTH FUNCTIONS # +############################## + +def _char_block_width(char): + # Basic Latin, which is probably the most common case + #if char in xrange(0x0021, 0x007e): + #if char >= 0x0021 and char <= 0x007e: + if 0x0021 <= char <= 0x007e: + return 1 + # Chinese, Japanese, Korean (common) + if 0x4e00 <= char <= 0x9fff: + return 2 + # Hangul + if 0xac00 <= char <= 0xd7af: + return 2 + # Combining? + if unicodedata.combining(uni_chr(char)): + return 0 + # Hiragana and Katakana + if 0x3040 <= char <= 0x309f or 0x30a0 <= char <= 0x30ff: + return 2 + # Full-width Latin characters + if 0xff01 <= char <= 0xff60: + return 2 + # CJK punctuation + if 0x3000 <= char <= 0x303e: + return 2 + # Backspace and delete + if char in (0x0008, 0x007f): + return -1 + # Other control characters + elif char in (0x0000, 0x001f): + return 0 + # Take a guess + return 1 + +def _str_block_width(val): + + return sum(itermap(_char_block_width, itermap(ord, _re.sub("", val)))) + +############################## +# TABLE FACTORIES # +############################## + +def from_csv(fp, field_names = None, **kwargs): + + dialect = csv.Sniffer().sniff(fp.read(1024)) + fp.seek(0) + reader = csv.reader(fp, dialect) + + table = PrettyTable(**kwargs) + if field_names: + table.field_names = field_names + else: + if py3k: + table.field_names = [x.strip() for x in next(reader)] + else: + table.field_names = [x.strip() for x in reader.next()] + + for row in reader: + table.add_row([x.strip() for x in row]) + + return table + +def from_db_cursor(cursor, **kwargs): + + if cursor.description: + table = PrettyTable(**kwargs) + table.field_names = [col[0] for col in cursor.description] + for row in cursor.fetchall(): + table.add_row(row) + return table + +class TableHandler(HTMLParser): + + def __init__(self, **kwargs): + HTMLParser.__init__(self) + self.kwargs = kwargs + self.tables = [] + self.last_row = [] + self.rows = [] + self.max_row_width = 0 + self.active = None + self.last_content = "" + self.is_last_row_header = False + + def handle_starttag(self,tag, attrs): + self.active = tag + if tag == "th": + self.is_last_row_header = True + + def handle_endtag(self,tag): + if tag in ["th", "td"]: + stripped_content = self.last_content.strip() + self.last_row.append(stripped_content) + if tag == "tr": + self.rows.append( + (self.last_row, self.is_last_row_header)) + self.max_row_width = max(self.max_row_width, len(self.last_row)) + self.last_row = [] + self.is_last_row_header = False + if tag == "table": + table = self.generate_table(self.rows) + self.tables.append(table) + self.rows = [] + self.last_content = " " + self.active = None + + + def handle_data(self, data): + self.last_content += data + + def generate_table(self, rows): + """ + Generates from a list of rows a PrettyTable object. + """ + table = PrettyTable(**self.kwargs) + for row in self.rows: + if len(row[0]) < self.max_row_width: + appends = self.max_row_width - len(row[0]) + for i in range(1,appends): + row[0].append("-") + + if row[1] == True: + self.make_fields_unique(row[0]) + table.field_names = row[0] + else: + table.add_row(row[0]) + return table + + def make_fields_unique(self, fields): + """ + iterates over the row and make each field unique + """ + for i in range(0, len(fields)): + for j in range(i+1, len(fields)): + if fields[i] == fields[j]: + fields[j] += "'" + +def from_html(html_code, **kwargs): + """ + Generates a list of PrettyTables from a string of HTML code. Each <table> in + the HTML becomes one PrettyTable object. + """ + + parser = TableHandler(**kwargs) + parser.feed(html_code) + return parser.tables + +def from_html_one(html_code, **kwargs): + """ + Generates a PrettyTables from a string of HTML code which contains only a + single <table> + """ + + tables = from_html(html_code, **kwargs) + try: + assert len(tables) == 1 + except AssertionError: + raise Exception("More than one <table> in provided HTML code! Use from_html instead.") + return tables[0] + +############################## +# MAIN (TEST FUNCTION) # +############################## + +def main(): + + x = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"]) + x.sortby = "Population" + x.reversesort = True + x.int_format["Area"] = "04d" + x.float_format = "6.1f" + x.align["City name"] = "l" # Left align city names + x.add_row(["Adelaide", 1295, 1158259, 600.5]) + x.add_row(["Brisbane", 5905, 1857594, 1146.4]) + x.add_row(["Darwin", 112, 120900, 1714.7]) + x.add_row(["Hobart", 1357, 205556, 619.5]) + x.add_row(["Sydney", 2058, 4336374, 1214.8]) + x.add_row(["Melbourne", 1566, 3806092, 646.9]) + x.add_row(["Perth", 5386, 1554769, 869.4]) + print(x) + +if __name__ == "__main__": + main() diff --git a/awx/lib/site-packages/pyrax/__init__.py b/awx/lib/site-packages/pyrax/__init__.py new file mode 100644 index 0000000000..22419efa19 --- /dev/null +++ b/awx/lib/site-packages/pyrax/__init__.py @@ -0,0 +1,761 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# For doxygen class doc generation: +""" +\mainpage Class Documentation for pyrax + +This module provides the Python Language Bindings for creating applications +built on the Rackspace / OpenStack Cloud.<br /> + +The source code for <b>pyrax</b> can be found at: + +http://github.com/rackspace/pyrax + +\package cf_wrapper + +This module wraps <b>swiftclient</b>, the Python client for OpenStack / Swift, +providing an object-oriented interface to the Swift object store. + +It also adds in CDN functionality that is Rackspace-specific. +""" +import ConfigParser +from functools import wraps +import inspect +import logging +import os + +# keyring is an optional import +try: + import keyring +except ImportError: + keyring = None + +# The following try block is only needed when first installing pyrax, +# since importing the version info in setup.py tries to import this +# entire module. +try: + from identity import * + + import exceptions as exc + import version + + import cf_wrapper.client as _cf + from cf_wrapper.storage_object import StorageObject + from cf_wrapper.container import Container + from novaclient import exceptions as _cs_exceptions + from novaclient import auth_plugin as _cs_auth_plugin + from novaclient.v1_1 import client as _cs_client + from novaclient.v1_1.servers import Server as CloudServer + + from autoscale import AutoScaleClient + from clouddatabases import CloudDatabaseClient + from clouddatabases import CloudDatabaseDatabase + from clouddatabases import CloudDatabaseFlavor + from clouddatabases import CloudDatabaseInstance + from clouddatabases import CloudDatabaseUser + from cloudloadbalancers import CloudLoadBalancer + from cloudloadbalancers import CloudLoadBalancerClient + from cloudblockstorage import CloudBlockStorageClient + from clouddns import CloudDNSClient + from cloudnetworks import CloudNetworkClient + from cloudmonitoring import CloudMonitorClient +except ImportError: + # See if this is the result of the importing of version.py in setup.py + callstack = inspect.stack() + in_setup = False + for stack in callstack: + if stack[1].endswith("/setup.py"): + in_setup = True + if not in_setup: + # This isn't a normal import problem during setup; re-raise + raise + +# Initiate the services to None until we are authenticated. +cloudservers = None +cloudfiles = None +cloud_loadbalancers = None +cloud_databases = None +cloud_blockstorage = None +cloud_dns = None +cloud_networks = None +cloud_monitoring = None +autoscale = None +# Default region for all services. Can be individually overridden if needed +default_region = None +# Encoding to use when working with non-ASCII names +default_encoding = "utf-8" + +# Config settings +settings = {} +_environment = "default" +identity = None + +# Value to plug into the user-agent headers +USER_AGENT = "pyrax/%s" % version.version + +# Do we output HTTP traffic for debugging? +_http_debug = False + +# Regions and services available from the service catalog +regions = tuple() +services = tuple() + +_client_classes = { + "database": CloudDatabaseClient, + "load_balancer": CloudLoadBalancerClient, + "volume": CloudBlockStorageClient, + "dns": CloudDNSClient, + "compute:network": CloudNetworkClient, + "monitor": CloudMonitorClient, + "autoscale": AutoScaleClient, + } + + +def _id_type(ityp): + """Allow for shorthand names for the most common types.""" + if ityp.lower() == "rackspace": + ityp = "rax_identity.RaxIdentity" + elif ityp.lower() == "keystone": + ityp = "keystone_identity.KeystoneIdentity" + return ityp + + +def _import_identity(import_str): + import_str = _id_type(import_str) + full_str = "pyrax.identity.%s" % import_str + return utils.import_class(full_str) + + + +class Settings(object): + """ + Holds and manages the settings for pyrax. + """ + _environment = None + env_dct = { + "identity_type": "CLOUD_ID_TYPE", + "auth_endpoint": "CLOUD_AUTH_ENDPOINT", + "keyring_username": "CLOUD_KEYRING_USER", + "region": "CLOUD_REGION", + "tenant_id": "CLOUD_TENANT_ID", + "tenant_name": "CLOUD_TENANT_NAME", + "encoding": "CLOUD_ENCODING", + "custom_user_agent": "CLOUD_USER_AGENT", + "debug": "CLOUD_DEBUG", + "verify_ssl": "CLOUD_VERIFY_SSL", + } + _settings = {"default": dict.fromkeys(env_dct.keys())} + _default_set = False + + + def get(self, key, env=None): + """ + Returns the config setting for the specified environment. If no + environment is specified, the value for the current environment is + returned. If an unknown key or environment is passed, None is returned. + """ + if env is None: + env = self.environment + try: + return self._settings[env][key] + except KeyError: + # See if it's set in the environment + if key == "identity_class": + # This is defined via the identity_type + env_var = self.env_dct.get("identity_type") + ityp = os.environ.get(env_var) + if ityp: + return _import_identity(ityp) + else: + env_var = self.env_dct.get(key) + try: + return os.environ[env_var] + except KeyError: + return None + + + def set(self, key, val, env=None): + """ + Changes the value for the setting specified by 'key' to the new value. + By default this will change the current environment, but you can change + values in other environments by passing the name of that environment as + the 'env' parameter. + """ + if env is None: + env = self.environment + else: + if env not in self._settings: + raise exc.EnvironmentNotFound("There is no environment named " + "'%s'." % env) + dct = self._settings[env] + if key not in dct: + raise exc.InvalidSetting("The setting '%s' is not defined." % key) + dct[key] = val + if key == "identity_type": + # If setting the identity_type, also change the identity_class. + dct["identity_class"] = _import_identity(val) + elif key == "region": + if not identity: + return + current = identity.region + if current == val: + return + if "LON" in (current, val): + # This is an outlier, as it has a separate auth + identity.region = val + elif key == "verify_ssl": + if not identity: + return + identity.verify_ssl = val + + + def _getEnvironment(self): + return self._environment or "default" + + def _setEnvironment(self, val): + if val not in self._settings: + raise exc.EnvironmentNotFound("The environment '%s' has not been " + "defined." % val) + if val != self.environment: + self._environment = val + clear_credentials() + _create_identity() + + environment = property(_getEnvironment, _setEnvironment, None, + """Users can define several environments for use with pyrax. This + holds the name of the current environment they are working in. + Changing this value will discard any existing authentication + credentials, and will set all the individual clients for cloud + services, such as `pyrax.cloudservers`, to None. You must + authenticate against the new environment with the credentials + appropriate for that cloud provider.""") + + + @property + def environments(self): + return self._settings.keys() + + + def read_config(self, config_file): + """ + Parses the specified configuration file and stores the values. Raises + an InvalidConfigurationFile exception if the file is not well-formed. + """ + cfg = ConfigParser.SafeConfigParser() + try: + cfg.read(config_file) + except ConfigParser.MissingSectionHeaderError as e: + # The file exists, but doesn't have the correct format. + raise exc.InvalidConfigurationFile(e) + + def safe_get(section, option, default=None): + try: + return cfg.get(section, option) + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + return default + + for section in cfg.sections(): + if section == "settings": + section_name = "default" + self._default_set = True + else: + section_name = section + dct = self._settings[section_name] = {} + dct["region"] = safe_get(section, "region", default_region) + ityp = safe_get(section, "identity_type") + dct["identity_type"] = _id_type(ityp) + dct["identity_class"] = _import_identity(ityp) + # Handle both the old and new names for this setting. + debug = safe_get(section, "debug") + if debug is None: + debug = safe_get(section, "http_debug", "False") + dct["http_debug"] = debug == "True" + verify_ssl = safe_get(section, "verify_ssl", "True") + dct["verify_ssl"] = verify_ssl == "True" + dct["keyring_username"] = safe_get(section, "keyring_username") + dct["encoding"] = safe_get(section, "encoding", default_encoding) + dct["auth_endpoint"] = safe_get(section, "auth_endpoint") + dct["tenant_name"] = safe_get(section, "tenant_name") + dct["tenant_id"] = safe_get(section, "tenant_id") + app_agent = safe_get(section, "custom_user_agent") + if app_agent: + # Customize the user-agent string with the app name. + dct["user_agent"] = "%s %s" % (app_agent, USER_AGENT) + else: + dct["user_agent"] = USER_AGENT + + # If this is the first section, make it the default + if not self._default_set: + self._settings["default"] = self._settings[section] + self._default_set = True + + +def get_environment(): + """ + Returns the name of the current environment. + """ + return settings.environment + + +def set_environment(env): + """ + Change your configuration environment. An EnvironmentNotFound exception + is raised if you pass in an undefined environment name. + """ + settings.environment = env + + +def list_environments(): + """ + Returns a list of all defined environments. + """ + return settings.environments + + +def get_setting(key, env=None): + """ + Returns the config setting for the specified key. If no environment is + specified, returns the setting for the current environment. + """ + return settings.get(key, env=env) + + +def set_setting(key, val, env=None): + """ + Changes the value of the specified key in the current environment, or in + another environment if specified. + """ + return settings.set(key, val, env=env) + + +def set_default_region(region): + """Changes the default_region setting.""" + global default_region + default_region = region + + +def _create_identity(): + """ + Creates an instance of the current identity_class and assigns it to the + module-level name 'identity'. + """ + global identity + cls = settings.get("identity_class") + if not cls: + raise exc.IdentityClassNotDefined("No identity class has " + "been defined for the current environment.") + verify_ssl = get_setting("verify_ssl") + identity = cls(verify_ssl=verify_ssl) + + +def _assure_identity(fnc): + """Ensures that the 'identity' attribute is not None.""" + def _wrapped(*args, **kwargs): + if identity is None: + _create_identity() + return fnc(*args, **kwargs) + return _wrapped + + +def _require_auth(fnc): + """Authentication decorator.""" + @wraps(fnc) + @_assure_identity + def _wrapped(*args, **kwargs): + if not identity.authenticated: + msg = "Authentication required before calling '%s'." % fnc.__name__ + raise exc.NotAuthenticated(msg) + return fnc(*args, **kwargs) + return _wrapped + + +@_assure_identity +def _safe_region(region=None): + """Value to use when no region is specified.""" + ret = region or settings.get("region") + if not ret: + # Nothing specified; get the default from the identity object. + ret = identity.get_default_region() + return ret + + +@_assure_identity +def auth_with_token(token, tenant_id=None, tenant_name=None, region=None): + """ + If you already have a valid token and either a tenant ID or name, you can + call this to configure the identity and available services. + """ + identity.auth_with_token(token, tenant_id=tenant_id, + tenant_name=tenant_name) + connect_to_services(region=region) + + +@_assure_identity +def set_credentials(username, api_key=None, password=None, region=None, + tenant_id=None, authenticate=True): + """ + Set the credentials directly, and then try to authenticate. + + If the region is passed, it will authenticate against the proper endpoint + for that region, and set the default region for connections. + """ + pw_key = password or api_key + region = _safe_region(region) + tenant_id = tenant_id or settings.get("tenant_id") + identity.set_credentials(username=username, password=pw_key, + tenant_id=tenant_id, region=region) + if authenticate: + _auth_and_connect(region=region) + + +@_assure_identity +def set_credential_file(cred_file, region=None, authenticate=True): + """ + Read in the credentials from the supplied file path, and then try to + authenticate. The file should be a standard config file in one of the + following formats: + + For Keystone authentication: + [keystone] + username = myusername + password = 1234567890abcdef + tenant_id = abcdef1234567890 + + For Rackspace authentication: + [rackspace_cloud] + username = myusername + api_key = 1234567890abcdef + + If the region is passed, it will authenticate against the proper endpoint + for that region, and set the default region for connections. + """ + region = _safe_region(region) + identity.set_credential_file(cred_file, region=region) + if authenticate: + _auth_and_connect(region=region) + + +def keyring_auth(username=None, region=None, authenticate=True): + """ + Use the password stored within the keyring to authenticate. If a username + is supplied, that name is used; otherwise, the keyring_username value + from the config file is used. + + If there is no username defined, or if the keyring module is not installed, + or there is no password set for the given username, the appropriate errors + will be raised. + + If the region is passed, it will authenticate against the proper endpoint + for that region, and set the default region for connections. + """ + if not keyring: + # Module not installed + raise exc.KeyringModuleNotInstalled("The 'keyring' Python module is " + "not installed on this system.") + if username is None: + username = settings.get("keyring_username") + if not username: + raise exc.KeyringUsernameMissing("No username specified for keyring " + "authentication.") + password = keyring.get_password("pyrax", username) + if password is None: + raise exc.KeyringPasswordNotFound("No password was found for the " + "username '%s'." % username) + set_credentials(username, password, region=region, + authenticate=authenticate) + + +def _auth_and_connect(region=None, connect=True): + """ + Handles the call to authenticate, and if successful, connects to the + various services. + """ + global default_region + identity.authenticated = False + default_region = region or default_region + try: + identity.authenticate() + except exc.AuthenticationFailed: + clear_credentials() + raise + if connect: + connect_to_services(region=region) + + +@_assure_identity +def authenticate(connect=True): + """ + Generally you will not need to call this directly; passing in your + credentials via set_credentials() and set_credential_file() will call + authenticate() on the identity object by default. But for situations where + you set your credentials manually or otherwise need finer control over + the authentication sequence, this method will call the identity object's + authenticate() method, and an AuthenticationFailed exception will be raised + if your credentials have not been properly set first. + + Normally after successful authentication, connections to the various + services will be made. However, passing False to the `connect` parameter + will skip the service connection step. + """ + _auth_and_connect(connect=connect) + + +def plug_hole_in_swiftclient_auth(clt, url): + """ + This is necessary because swiftclient has an issue when a token expires and + it needs to re-authenticate against Rackspace auth. It is a temporary + workaround until we can fix swiftclient. + """ + conn = clt.connection + conn.token = identity.token + conn.url = url + + +def clear_credentials(): + """De-authenticate by clearing all the names back to None.""" + global identity, regions, services, cloudservers, cloudfiles + global cloud_loadbalancers, cloud_databases, cloud_blockstorage, cloud_dns + global cloud_networks, cloud_monitoring, autoscale + identity = None + regions = tuple() + services = tuple() + cloudservers = None + cloudfiles = None + cloud_loadbalancers = None + cloud_databases = None + cloud_blockstorage = None + cloud_dns = None + cloud_networks = None + cloud_monitoring = None + autoscale = None + + +def _make_agent_name(base): + """Appends pyrax information to the underlying library's user agent.""" + if base: + if "pyrax" in base: + return base + else: + return "%s %s" % (USER_AGENT, base) + else: + return USER_AGENT + + +def connect_to_services(region=None): + """Establishes authenticated connections to the various cloud APIs.""" + global cloudservers, cloudfiles, cloud_loadbalancers, cloud_databases + global cloud_blockstorage, cloud_dns, cloud_networks, cloud_monitoring + global autoscale + cloudservers = connect_to_cloudservers(region=region) + cloudfiles = connect_to_cloudfiles(region=region) + cloud_loadbalancers = connect_to_cloud_loadbalancers(region=region) + cloud_databases = connect_to_cloud_databases(region=region) + cloud_blockstorage = connect_to_cloud_blockstorage(region=region) + cloud_dns = connect_to_cloud_dns(region=region) + cloud_networks = connect_to_cloud_networks(region=region) + cloud_monitoring = connect_to_cloud_monitoring(region=region) + autoscale = connect_to_autoscale(region=region) + + +def _get_service_endpoint(svc, region=None, public=True): + """ + Parses the services dict to get the proper endpoint for the given service. + """ + region = _safe_region(region) + url_type = {True: "public_url", False: "internal_url"}[public] + ep = identity.services.get(svc, {}).get("endpoints", {}).get( + region, {}).get(url_type) + if not ep: + # Try the "ALL" region, and substitute the actual region + ep = identity.services.get(svc, {}).get("endpoints", {}).get( + "ALL", {}).get(url_type) + return ep + + +@_require_auth +def connect_to_cloudservers(region=None, **kwargs): + """Creates a client for working with cloud servers.""" + _cs_auth_plugin.discover_auth_systems() + id_type = get_setting("identity_type") + if id_type != "keystone": + auth_plugin = _cs_auth_plugin.load_plugin(id_type) + else: + auth_plugin = None + region = _safe_region(region) + mgt_url = _get_service_endpoint("compute", region) + cloudservers = None + if not mgt_url: + # Service is not available + return + insecure = not get_setting("verify_ssl") + cloudservers = _cs_client.Client(identity.username, identity.password, + project_id=identity.tenant_id, auth_url=identity.auth_endpoint, + auth_system=id_type, region_name=region, service_type="compute", + auth_plugin=auth_plugin, insecure=insecure, + http_log_debug=_http_debug, **kwargs) + agt = cloudservers.client.USER_AGENT + cloudservers.client.USER_AGENT = _make_agent_name(agt) + cloudservers.client.management_url = mgt_url + cloudservers.client.auth_token = identity.token + cloudservers.exceptions = _cs_exceptions + # Add some convenience methods + cloudservers.list_images = cloudservers.images.list + cloudservers.list_flavors = cloudservers.flavors.list + cloudservers.list = cloudservers.servers.list + + def list_base_images(): + """ + Returns a list of all base images; excludes any images created + by this account. + """ + return [image for image in cloudservers.images.list() + if not hasattr(image, "server")] + + def list_snapshots(): + """ + Returns a list of all images created by this account; in other words, it + excludes all the base images. + """ + return [image for image in cloudservers.images.list() + if hasattr(image, "server")] + + cloudservers.list_base_images = list_base_images + cloudservers.list_snapshots = list_snapshots + return cloudservers + + +@_require_auth +def connect_to_cloudfiles(region=None, public=True): + """ + Creates a client for working with cloud files. The default is to connect + to the public URL; if you need to work with the ServiceNet connection, pass + False to the 'public' parameter. + """ + region = _safe_region(region) + cf_url = _get_service_endpoint("object_store", region, public=public) + cloudfiles = None + if not cf_url: + # Service is not available + return + cdn_url = _get_service_endpoint("object_cdn", region) + ep_type = {True: "publicURL", False: "internalURL"}[public] + opts = {"tenant_id": identity.tenant_name, "auth_token": identity.token, + "endpoint_type": ep_type, "tenant_name": identity.tenant_name, + "object_storage_url": cf_url, "object_cdn_url": cdn_url, + "region_name": region} + verify_ssl = get_setting("verify_ssl") + cloudfiles = _cf.CFClient(identity.auth_endpoint, identity.username, + identity.password, tenant_name=identity.tenant_name, + preauthurl=cf_url, preauthtoken=identity.token, auth_version="2", + os_options=opts, verify_ssl=verify_ssl, http_log_debug=_http_debug) + cloudfiles.user_agent = _make_agent_name(cloudfiles.user_agent) + return cloudfiles + + +@_require_auth +def _create_client(ep_name, service_type, region): + region = _safe_region(region) + ep = _get_service_endpoint(ep_name.split(":")[0], region) + if not ep: + return + verify_ssl = get_setting("verify_ssl") + cls = _client_classes[ep_name] + client = cls(region_name=region, management_url=ep, verify_ssl=verify_ssl, + http_log_debug=_http_debug, service_type=service_type) + client.user_agent = _make_agent_name(client.user_agent) + return client + + +def connect_to_cloud_databases(region=None): + """Creates a client for working with cloud databases.""" + return _create_client(ep_name="database", service_type="rax:database", + region=region) + + +def connect_to_cloud_loadbalancers(region=None): + """Creates a client for working with cloud loadbalancers.""" + return _create_client(ep_name="load_balancer", + service_type="rax:load-balancer", region=region) + + +def connect_to_cloud_blockstorage(region=None): + """Creates a client for working with cloud blockstorage.""" + return _create_client(ep_name="volume", service_type="volume", + region=region) + + +def connect_to_cloud_dns(region=None): + """Creates a client for working with cloud dns.""" + return _create_client(ep_name="dns", service_type="rax:dns", region=region) + + +def connect_to_cloud_networks(region=None): + """Creates a client for working with cloud networks.""" + return _create_client(ep_name="compute:network", service_type="compute", + region=region) + + +def connect_to_cloud_monitoring(region=None): + """Creates a client for working with cloud monitoring.""" + return _create_client(ep_name="monitor", service_type="monitor", + region=region) + + +def connect_to_autoscale(region=None): + """Creates a client for working with AutoScale.""" + return _create_client(ep_name="autoscale", + service_type="autoscale", region=region) + + +def get_http_debug(): + return _http_debug + + +@_assure_identity +def set_http_debug(val): + global _http_debug + _http_debug = val + # Set debug on the various services + identity.http_log_debug = val + for svc in (cloudservers, cloudfiles, cloud_loadbalancers, + cloud_blockstorage, cloud_databases, cloud_dns, cloud_networks, + autoscale): + if svc is not None: + svc.http_log_debug = val + if not val: + # Need to manually remove the debug handler for swiftclient + swift_logger = _cf._swift_client.logger + for handler in swift_logger.handlers: + if isinstance(handler, logging.StreamHandler): + swift_logger.removeHandler(handler) + + +def get_encoding(): + """Returns the unicode encoding type.""" + return settings.get("encoding") or default_encoding + + +# Read in the configuration file, if any +settings = Settings() +config_file = os.path.expanduser("~/.pyrax.cfg") +if os.path.exists(config_file): + settings.read_config(config_file) + debug = get_setting("http_debug") or False + set_http_debug(debug) diff --git a/awx/lib/site-packages/pyrax/autoscale.py b/awx/lib/site-packages/pyrax/autoscale.py new file mode 100644 index 0000000000..c5a8930f17 --- /dev/null +++ b/awx/lib/site-packages/pyrax/autoscale.py @@ -0,0 +1,1023 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2013 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from functools import wraps + +import pyrax +from pyrax.client import BaseClient +from pyrax.cloudloadbalancers import CloudLoadBalancer +from pyrax.cloudnetworks import SERVICE_NET_ID +import pyrax.exceptions as exc +from pyrax.manager import BaseManager +from pyrax.resource import BaseResource +import pyrax.utils as utils + + + +class ScalingGroup(BaseResource): + def __init__(self, *args, **kwargs): + super(ScalingGroup, self).__init__(*args, **kwargs) + self._non_display = ["active", "launchConfiguration", "links", + "groupConfiguration", "policies", "scalingPolicies"] + self._repr_properties = ["name", "cooldown", "metadata", + "min_entities", "max_entities"] + self._make_policies() + + + def _make_policies(self): + """ + Convert the 'scalingPolicies' dictionary into AutoScalePolicy objects. + """ + self.policies = [AutoScalePolicy(self.manager, dct, self) + for dct in self.scalingPolicies] + + + def get_state(self): + """ + Returns the current state of this scaling group. + """ + return self.manager.get_state(self) + + + def pause(self): + """ + Pauses all execution of the policies for this scaling group. + """ + return self.manager.pause(self) + + + def resume(self): + """ + Resumes execution of the policies for this scaling group. + """ + return self.manager.resume(self) + + + def update(self, name=None, cooldown=None, min_entities=None, + max_entities=None, metadata=None): + """ + Updates this ScalingGroup. One or more of the attributes can be + specified. + + NOTE: if you specify metadata, it will *replace* any existing metadata. + If you want to add to it, you either need to pass the complete dict of + metadata, or call the update_metadata() method. + """ + return self.manager.update(self, name=name, + cooldown=cooldown, min_entities=min_entities, + max_entities=max_entities, metadata=metadata) + + + def update_metadata(self, metadata): + """ + Adds the given metadata dict to the existing metadata for this scaling + group. + """ + return self.manager.update_metadata(self, metadata=metadata) + + + def get_configuration(self): + """ + Returns the scaling group configuration in a dictionary. + """ + return self.manager.get_configuration(self) + + + def get_launch_config(self): + """ + Returns the launch configuration for this scaling group. + """ + return self.manager.get_launch_config(self) + + + def update_launch_config(self, server_name=None, image=None, flavor=None, + disk_config=None, metadata=None, personality=None, networks=None, + load_balancers=None): + """ + Updates the server launch configuration for this scaling group. + One or more of the available attributes can be specified. + + NOTE: if you specify metadata, it will *replace* any existing metadata. + If you want to add to it, you either need to pass the complete dict of + metadata, or call the update_launch_metadata() method. + """ + return self.manager.update_launch_config(self, server_name=server_name, + image=image, flavor=flavor, disk_config=disk_config, + metadata=metadata, personality=personality, networks=networks, + load_balancers=load_balancers) + + + def update_launch_metadata(self, metadata): + """ + Adds the given metadata dict to the existing metadata for this scaling + group's launch configuration. + """ + return self.manager.update_launch_metadata(self, metadata) + + + def add_policy(self, name, policy_type, cooldown, change, is_percent=False): + """ + Adds a policy with the given values to this scaling group. The + 'change' parameter is treated as an absolute amount, unless + 'is_percent' is True, in which case it is treated as a percentage. + """ + return self.manager.add_policy(self, name, policy_type, cooldown, + change, is_percent=is_percent) + + + def list_policies(self): + """ + Returns a list of all policies defined for this scaling group. + """ + return self.manager.list_policies(self) + + + def get_policy(self, policy): + """ + Gets the detail for the specified policy. + """ + return self.manager.get_policy(self, policy) + + + def update_policy(self, policy, name=None, policy_type=None, cooldown=None, + change=None, is_percent=False): + """ + Updates the specified policy. One or more of the parameters may be + specified. + """ + return self.manager.update_policy(scaling_group=self, policy=policy, + name=name, policy_type=policy_type, cooldown=cooldown, + change=change, is_percent=is_percent) + + + def execute_policy(self, policy): + """ + Executes the specified policy for this scaling group. + """ + return self.manager.execute_policy(scaling_group=self, policy=policy) + + + def delete_policy(self, policy): + """ + Deletes the specified policy from this scaling group. + """ + return self.manager.delete_policy(scaling_group=self, policy=policy) + + + def add_webhook(self, policy, name, metadata=None): + """ + Adds a webhook to the specified policy. + """ + return self.manager.add_webhook(self, policy, name, metadata=metadata) + + + def list_webhooks(self, policy): + """ + Returns a list of all webhooks for the specified policy. + """ + return self.manager.list_webhooks(self, policy) + + + def update_webhook(self, policy, webhook, name=None, metadata=None): + """ + Updates the specified webhook. One or more of the parameters may be + specified. + """ + return self.manager.update_webhook(scaling_group=self, policy=policy, + webhook=webhook, name=name, metadata=metadata) + + + def update_webhook_metadata(self, policy, webhook, metadata): + """ + Adds the given metadata dict to the existing metadata for the specified + webhook. + """ + return self.manager.update_webhook_metadata(self, policy, webhook, + metadata) + + + def delete_webhook(self, policy, webhook): + """ + Deletes the specified webhook from the specified policy. + """ + return self.manager.delete_webhook(self, policy, webhook) + + + @property + def policy_count(self): + return len(self.policies) + + + ################################################################## + # The following property declarations allow access to the base attributes + # of the ScalingGroup held in the 'groupConfiguration' dict as if they + # were native attributes. + ################################################################## + @property + def name(self): + return self.groupConfiguration.get("name") + + @name.setter + def name(self, val): + self.groupConfiguration["name"] = val + + @property + def cooldown(self): + return self.groupConfiguration.get("cooldown") + + @cooldown.setter + def cooldown(self, val): + self.groupConfiguration["cooldown"] = val + + + @property + def metadata(self): + return self.groupConfiguration.get("metadata") + + @metadata.setter + def metadata(self, val): + self.groupConfiguration["metadata"] = val + + + @property + def min_entities(self): + return self.groupConfiguration.get("minEntities") + + @min_entities.setter + def min_entities(self, val): + self.groupConfiguration["minEntities"] = val + + + @property + def max_entities(self): + return self.groupConfiguration.get("maxEntities") + + @max_entities.setter + def max_entities(self, val): + self.groupConfiguration["maxEntities"] = val + ################################################################## + + + +class ScalingGroupManager(BaseManager): + def __init__(self, api, resource_class=None, response_key=None, + plural_response_key=None, uri_base=None): + super(ScalingGroupManager, self).__init__(api, + resource_class=resource_class, response_key=response_key, + plural_response_key=plural_response_key, uri_base=uri_base) + + + def get_state(self, scaling_group): + """ + Returns the current state of the specified scaling group as a + dictionary. + """ + uri = "/%s/%s/state" % (self.uri_base, utils.get_id(scaling_group)) + resp, resp_body = self.api.method_get(uri) + data = resp_body["group"] + ret = {} + ret["active"] = [itm["id"] for itm in data["active"]] + ret["active_capacity"] = data["activeCapacity"] + ret["desired_capacity"] = data["desiredCapacity"] + ret["pending_capacity"] = data["pendingCapacity"] + ret["paused"] = data["paused"] + return ret + + + def pause(self, scaling_group): + """ + Pauses all execution of the policies for the specified scaling group. + """ + uri = "/%s/%s/pause" % (self.uri_base, utils.get_id(scaling_group)) + resp, resp_body = self.api.method_post(uri) + return None + + + def resume(self, scaling_group): + """ + Resumes execution of the policies for the specified scaling group. + """ + uri = "/%s/%s/resume" % (self.uri_base, utils.get_id(scaling_group)) + resp, resp_body = self.api.method_post(uri) + return None + + + def get_configuration(self, scaling_group): + """ + Returns the scaling group's configuration in a dictionary. + """ + uri = "/%s/%s/config" % (self.uri_base, utils.get_id(scaling_group)) + resp, resp_body = self.api.method_get(uri) + return resp_body.get("groupConfiguration") + + + def update(self, scaling_group, name=None, cooldown=None, + min_entities=None, max_entities=None, metadata=None): + """ + Updates an existing ScalingGroup. One or more of the attributes can + be specified. + + NOTE: if you specify metadata, it will *replace* any existing metadata. + If you want to add to it, you either need to pass the complete dict of + metadata, or call the update_metadata() method. + """ + if not isinstance(scaling_group, ScalingGroup): + scaling_group = self.get(scaling_group) + uri = "/%s/%s/config" % (self.uri_base, scaling_group.id) + if cooldown is None: + cooldown = scaling_group.cooldown + if min_entities is None: + min_entities = scaling_group.min_entities + if max_entities is None: + max_entities = scaling_group.max_entities + body = {"name": name or scaling_group.name, + "cooldown": cooldown, + "minEntities": min_entities, + "maxEntities": max_entities, + "metadata": metadata or scaling_group.metadata, + } + resp, resp_body = self.api.method_put(uri, body=body) + return None + + + def update_metadata(self, scaling_group, metadata): + """ + Adds the given metadata dict to the existing metadata for the scaling + group. + """ + if not isinstance(scaling_group, ScalingGroup): + scaling_group = self.get(scaling_group) + curr_meta = scaling_group.metadata + curr_meta.update(metadata) + return self.update(scaling_group, metadata=curr_meta) + + + def get_launch_config(self, scaling_group): + """ + Returns the launch configuration for the specified scaling group. + """ + uri = "/%s/%s/launch" % (self.uri_base, utils.get_id(scaling_group)) + resp, resp_body = self.api.method_get(uri) + ret = {} + data = resp_body.get("launchConfiguration") + ret["type"] = data.get("type") + args = data.get("args", {}) + ret["load_balancers"] = args.get("loadBalancers") + srv = args.get("server", {}) + ret["name"] = srv.get("name") + ret["flavor"] = srv.get("flavorRef") + ret["image"] = srv.get("imageRef") + ret["disk_config"] = srv.get("OS-DCF:diskConfig") + ret["metadata"] = srv.get("metadata") + ret["personality"] = srv.get("personality") + ret["networks"] = srv.get("networks") + return ret + + + def update_launch_config(self, scaling_group, server_name=None, image=None, + flavor=None, disk_config=None, metadata=None, personality=None, + networks=None, load_balancers=None): + """ + Updates the server launch configuration for an existing scaling group. + One or more of the available attributes can be specified. + + NOTE: if you specify metadata, it will *replace* any existing metadata. + If you want to add to it, you either need to pass the complete dict of + metadata, or call the update_launch_metadata() method. + """ + if not isinstance(scaling_group, ScalingGroup): + scaling_group = self.get(scaling_group) + uri = "/%s/%s/launch" % (self.uri_base, scaling_group.id) + largs = scaling_group.launchConfiguration.get("args", {}) + srv_args = largs.get("server", {}) + lb_args = largs.get("loadBalancers", {}) + body = {"type": "launch_server", + "args": { + "server": { + "name": server_name or srv_args.get("name"), + "imageRef": image or srv_args.get("imageRef"), + "flavorRef": flavor or srv_args.get("flavorRef"), + "OS-DCF:diskConfig": disk_config or + srv_args.get("OS-DCF:diskConfig"), + "personality": personality or + srv_args.get("personality"), + "networks": networks or srv_args.get("networks"), + "metadata": metadata or srv_args.get("metadata"), + }, + "loadBalancers": load_balancers or lb_args, + }, + } + resp, resp_body = self.api.method_put(uri, body=body) + return None + + + def update_launch_metadata(self, scaling_group, metadata): + """ + Adds the given metadata dict to the existing metadata for the scaling + group's launch configuration. + """ + if not isinstance(scaling_group, ScalingGroup): + scaling_group = self.get(scaling_group) + curr_meta = scaling_group.launchConfiguration.get("args", {}).get( + "server", {}).get("metadata", {}) + curr_meta.update(metadata) + return self.update_launch_config(scaling_group, metadata=curr_meta) + + + def add_policy(self, scaling_group, name, policy_type, cooldown, change, + is_percent=False): + """ + Adds a policy with the given values to the specified scaling group. The + 'change' parameter is treated as an absolute amount, unless + 'is_percent' is True, in which case it is treated as a percentage. + """ + uri = "/%s/%s/policies" % (self.uri_base, utils.get_id(scaling_group)) + body = {"name": name, "cooldown": cooldown, "type": policy_type} + if is_percent: + body["changePercent"] = change + else: + body["change"] = change + # "body" needs to be a list + body = [body] + resp, resp_body = self.api.method_post(uri, body=body) + pol_info = resp_body.get("policies")[0] + return AutoScalePolicy(self, pol_info, scaling_group) + + + def list_policies(self, scaling_group): + """ + Returns a list of all policies defined for the specified scaling group. + """ + uri = "/%s/%s/policies" % (self.uri_base, utils.get_id(scaling_group)) + resp, resp_body = self.api.method_get(uri) + return [AutoScalePolicy(self, data, scaling_group) + for data in resp_body.get("policies", [])] + + + def get_policy(self, scaling_group, policy): + """ + Gets the detail for the specified policy. + """ + uri = "/%s/%s/policies/%s" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy)) + resp, resp_body = self.api.method_get(uri) + data = resp_body.get("policy") + return AutoScalePolicy(self, data, scaling_group) + + + def update_policy(self, scaling_group, policy, name=None, policy_type=None, + cooldown=None, change=None, is_percent=False): + """ + Updates the specified policy. One or more of the parameters may be + specified. + """ + uri = "/%s/%s/policies/%s" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy)) + if not isinstance(policy, AutoScalePolicy): + # Received an ID + policy = self.get_policy(scaling_group, policy) + body = {"name": name or policy.name, + "type": policy_type or policy.type, + "cooldown": cooldown or policy.cooldown, + } + if is_percent: + body["changePercent"] = change or policy.changePercent + else: + body["change"] = change or policy.change + resp, resp_body = self.api.method_put(uri, body=body) + return None + + + def execute_policy(self, scaling_group, policy): + """ + Executes the specified policy for this scaling group. + """ + uri = "/%s/%s/policies/%s/execute" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy)) + resp, resp_body = self.api.method_post(uri) + return None + + + def delete_policy(self, scaling_group, policy): + """ + Deletes the specified policy from the scaling group. + """ + uri = "/%s/%s/policies/%s" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy)) + resp, resp_body = self.api.method_delete(uri) + + + def add_webhook(self, scaling_group, policy, name, metadata=None): + """ + Adds a webhook to the specified policy. + """ + uri = "/%s/%s/policies/%s/webhooks" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy)) + body = {"name": name} + if metadata is not None: + body["metadata"] = metadata + # "body" needs to be a list + body = [body] + resp, resp_body = self.api.method_post(uri, body=body) + data = resp_body.get("webhooks")[0] + return AutoScaleWebhook(self, data, policy) + + + def list_webhooks(self, scaling_group, policy): + """ + Returns a list of all webhooks for the specified policy. + """ + uri = "/%s/%s/policies/%s/webhooks" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy)) + resp, resp_body = self.api.method_get(uri) + return [AutoScaleWebhook(self, data, policy) + for data in resp_body.get("webhooks", [])] + + + def get_webhook(self, scaling_group, policy, webhook): + """ + Gets the detail for the specified webhook. + """ + uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy), + utils.get_id(webhook)) + resp, resp_body = self.api.method_get(uri) + data = resp_body.get("webhook") + return AutoScaleWebhook(self, data, policy) + + + def update_webhook(self, scaling_group, policy, webhook, name=None, + metadata=None): + """ + Updates the specified webhook. One or more of the parameters may be + specified. + """ + uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy), + utils.get_id(webhook)) + if not isinstance(webhook, AutoScaleWebhook): + # Received an ID + webhook = self.get_webhook(scaling_group, policy, webhook) + body = {"name": name or webhook.name, + "metadata": metadata or webhook.metadata, + } + resp, resp_body = self.api.method_put(uri, body=body) + webhook.reload() + return webhook + + + def update_webhook_metadata(self, scaling_group, policy, webhook, metadata): + """ + Adds the given metadata dict to the existing metadata for the specified + webhook. + """ + if not isinstance(webhook, AutoScaleWebhook): + webhook = self.get_webhook(scaling_group, policy, webhook) + curr_meta = webhook.metadata or {} + curr_meta.update(metadata) + return self.update_webhook(scaling_group, policy, webhook, + metadata=curr_meta) + + + def delete_webhook(self, scaling_group, policy, webhook): + """ + Deletes the specified webhook from the specified policy. + """ + uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base, + utils.get_id(scaling_group), utils.get_id(policy), + utils.get_id(webhook)) + resp, resp_body = self.api.method_delete(uri) + return None + + + +class AutoScalePolicy(BaseResource): + def __init__(self, manager, info, scaling_group, *args, **kwargs): + super(AutoScalePolicy, self).__init__(manager, info, *args, **kwargs) + if not isinstance(scaling_group, ScalingGroup): + scaling_group = manager.get(scaling_group) + self.scaling_group = scaling_group + self._non_display = ["links", "scaling_group"] + + + def get(self): + """ + Gets the details for this policy. + """ + return self.manager.get_policy(self.scaling_group, self) + reload = get + + + def delete(self): + """ + Deletes this policy. + """ + return self.manager.delete_policy(self.scaling_group, self) + + + def update(self, name=None, policy_type=None, cooldown=None, change=None, + is_percent=False): + """ + Updates this policy. One or more of the parameters may be + specified. + """ + return self.manager.update_policy(scaling_group=self.scaling_group, + policy=self, name=name, policy_type=policy_type, + cooldown=cooldown, change=change, is_percent=is_percent) + + + def execute(self): + """ + Executes this policy. + """ + return self.manager.execute_policy(self.scaling_group, self) + + + def add_webhook(self, name, metadata=None): + """ + Adds a webhook to this policy. + """ + return self.manager.add_webhook(self.scaling_group, self, name, + metadata=metadata) + + + def list_webhooks(self): + """ + Returns a list of all webhooks for this policy. + """ + return self.manager.list_webhooks(self.scaling_group, self) + + + def get_webhook(self, webhook): + """ + Gets the detail for the specified webhook. + """ + return self.manager.get_webhook(self.scaling_group, self, webhook) + + + def update_webhook(self, webhook, name=None, metadata=None): + """ + Updates the specified webhook. One or more of the parameters may be + specified. + """ + return self.manager.update_webhook(self.scaling_group, policy=self, + webhook=webhook, name=name, metadata=metadata) + + + def update_webhook_metadata(self, webhook, metadata): + """ + Adds the given metadata dict to the existing metadata for the specified + webhook. + """ + return self.manager.update_webhook_metadata(self.scaling_group, self, + webhook, metadata) + + + def delete_webhook(self, webhook): + """ + Deletes the specified webhook from this policy. + """ + return self.manager.delete_webhook(self.scaling_group, self, webhook) + + + +class AutoScaleWebhook(BaseResource): + def __init__(self, manager, info, policy, *args, **kwargs): + super(AutoScaleWebhook, self).__init__(manager, info, *args, **kwargs) + if not isinstance(policy, AutoScalePolicy): + policy = manager.get_policy(policy) + self.policy = policy + self._non_display = ["links", "policy"] + + + def get(self): + return self.policy.get_webhook(self) + reload = get + + + def update(self, name=None, metadata=None): + """ + Updates this webhook. One or more of the parameters may be specified. + """ + return self.policy.update_webhook(self, name=name, metadata=metadata) + + + def update_metadata(self, metadata): + """ + Adds the given metadata dict to the existing metadata for this webhook. + """ + return self.policy.update_webhook_metadata(self, metadata) + + + def delete(self): + """ + Deletes this webhook. + """ + return self.policy.delete_webhook(self) + + + +class AutoScaleClient(BaseClient): + """ + This is the primary class for interacting with AutoScale. + """ + name = "Autoscale" + + def _configure_manager(self): + """ + Creates a manager to handle autoscale operations. + """ + self._manager = ScalingGroupManager(self, + resource_class=ScalingGroup, response_key="group", + uri_base="groups") + + + def get_state(self, scaling_group): + """ + Returns the current state of the specified scaling group. + """ + return self._manager.get_state(scaling_group) + + + def pause(self, scaling_group): + """ + Pauses all execution of the policies for the specified scaling group. + """ + #NOTE: This is not yet implemented. The code is based on the docs, + # so it should either work or be pretty close. + return self._manager.pause(scaling_group) + + + def resume(self, scaling_group): + """ + Resumes execution of the policies for the specified scaling group. + """ + #NOTE: This is not yet implemented. The code is based on the docs, + # so it should either work or be pretty close. + return self._manager.resume(scaling_group) + + + def update(self, scaling_group, name=None, cooldown=None, + min_entities=None, max_entities=None, metadata=None): + """ + Updates an existing ScalingGroup. One or more of the attributes can + be specified. + + NOTE: if you specify metadata, it will *replace* any existing metadata. + If you want to add to it, you either need to pass the complete dict of + metadata, or call the update_metadata() method. + """ + return self._manager.update(scaling_group, name=name, cooldown=cooldown, + min_entities=min_entities, max_entities=max_entities, + metadata=metadata) + + + def update_metadata(self, scaling_group, metadata): + """ + Adds the given metadata dict to the existing metadata for the scaling + group. + """ + return self._manager.update_metadata(scaling_group, metadata) + + + def get_configuration(self, scaling_group): + """ + Returns the scaling group's configuration in a dictionary. + """ + return self._manager.get_configuration(scaling_group) + + + def get_launch_config(self, scaling_group): + """ + Returns the launch configuration for the specified scaling group. + """ + return self._manager.get_launch_config(scaling_group) + + + def update_launch_config(self, scaling_group, server_name=None, image=None, + flavor=None, disk_config=None, metadata=None, personality=None, + networks=None, load_balancers=None): + """ + Updates the server launch configuration for an existing scaling group. + One or more of the available attributes can be specified. + + NOTE: if you specify metadata, it will *replace* any existing metadata. + If you want to add to it, you either need to pass the complete dict of + metadata, or call the update_launch_metadata() method. + """ + return self._manager.update_launch_config(scaling_group, + server_name=server_name, image=image, flavor=flavor, + disk_config=disk_config, metadata=metadata, + personality=personality, networks=networks, + load_balancers=load_balancers) + + + def update_launch_metadata(self, scaling_group, metadata): + """ + Adds the given metadata dict to the existing metadata for the scaling + group's launch configuration. + """ + return self._manager.update_launch_metadata(scaling_group, metadata) + + + def add_policy(self, scaling_group, name, policy_type, cooldown, change, + is_percent=False): + """ + Adds a policy with the given values to the specified scaling group. The + 'change' parameter is treated as an absolute amount, unless + 'is_percent' is True, in which case it is treated as a percentage. + """ + return self._manager.add_policy(scaling_group, name, policy_type, + cooldown, change, is_percent=is_percent) + + + def list_policies(self, scaling_group): + """ + Returns a list of all policies defined for the specified scaling group. + """ + return self._manager.list_policies(scaling_group) + + + def get_policy(self, scaling_group, policy): + """ + Gets the detail for the specified policy. + """ + return self._manager.get_policy(scaling_group, policy) + + + def update_policy(self, scaling_group, policy, name=None, policy_type=None, + cooldown=None, change=None, is_percent=False): + """ + Updates the specified policy. One or more of the parameters may be + specified. + """ + return self._manager.update_policy(scaling_group=scaling_group, + policy=policy, name=name, policy_type=policy_type, + cooldown=cooldown, change=change, is_percent=is_percent) + + + def execute_policy(self, scaling_group, policy): + """ + Executes the specified policy for the scaling group. + """ + return self._manager.execute_policy(scaling_group=scaling_group, + policy=policy) + + + def delete_policy(self, scaling_group, policy): + """ + Deletes the specified policy from the scaling group. + """ + return self._manager.delete_policy(scaling_group=scaling_group, + policy=policy) + + + def add_webhook(self, scaling_group, policy, name, metadata=None): + """ + Adds a webhook to the specified policy. + """ + return self._manager.add_webhook(scaling_group, policy, name, + metadata=metadata) + + + def list_webhooks(self, scaling_group, policy): + """ + Returns a list of all webhooks defined for the specified policy. + """ + return self._manager.list_webhooks(scaling_group, policy) + + + def get_webhook(self, scaling_group, policy, webhook): + """ + Gets the detail for the specified webhook. + """ + return self._manager.get_webhook(scaling_group, policy, webhook) + + + def update_webhook(self, scaling_group, policy, webhook, name=None, + metadata=None): + """ + Updates the specified webhook. One or more of the parameters may be + specified. + """ + return self._manager.update_webhook(scaling_group=scaling_group, + policy=policy, webhook=webhook, name=name, metadata=metadata) + + + def update_webhook_metadata(self, scaling_group, policy, webhook, metadata): + """ + Adds the given metadata dict to the existing metadata for the specified + webhook. + """ + return self._manager.update_webhook_metadata(scaling_group, policy, + webhook, metadata) + + + def delete_webhook(self, scaling_group, policy, webhook): + """ + Deletes the specified webhook from the policy. + """ + return self._manager.delete_webhook(scaling_group, policy, webhook) + + + def _resolve_lbs(self, load_balancers): + """ + Takes either a single LB reference or a list of references and returns + the dictionary required for creating a Scaling Group. + + References can be either a dict that matches the structure required by + the autoscale API, a CloudLoadBalancer instance, or the ID of the load + balancer. + """ + lb_args = [] + lbs = utils.coerce_string_to_list(load_balancers) + for lb in lbs: + if isinstance(lb, dict): + lb_args.append(lb) + elif isinstance(lb, CloudLoadBalancer): + lb_args.append({ + "loadBalancerId": lb.id, + "port": lb.port, + }) + else: + # See if it's an ID for a Load Balancer + try: + instance = pyrax.cloud_loadbalancers.get(lb) + except Exception: + raise exc.InvalidLoadBalancer("Received an invalid " + "specification for a Load Balancer: '%s'" % lb) + lb_args.append({ + "loadBalancerId": instance.id, + "port": instance.port, + }) + return lb_args + + + def _create_body(self, name, cooldown, min_entities, max_entities, + launch_config_type, server_name, image, flavor, disk_config=None, + metadata=None, personality=None, networks=None, + load_balancers=None, scaling_policies=None): + """ + Used to create the dict required to create any of the following: + A Scaling Group + """ + if disk_config is None: + disk_config = "AUTO" + if metadata is None: + metadata = {} + if personality is None: + personality = [] + if networks is None: + # Default to ServiceNet only + networks = [{"uuid": SERVICE_NET_ID}] + if load_balancers is None: + load_balancers = [] + if scaling_policies is None: + scaling_policies = [] + server_args = { + "flavorRef": flavor, + "name": server_name, + "imageRef": utils.get_id(image), + } + if metadata is not None: + server_args["metadata"] = metadata + if personality is not None: + server_args["personality"] = personality + if networks is not None: + server_args["networks"] = networks + if disk_config is not None: + server_args["OS-DCF:diskConfig"] = disk_config + load_balancer_args = self._resolve_lbs(load_balancers) + body = {"groupConfiguration": { + "name": name, + "cooldown": cooldown, + "minEntities": min_entities, + "maxEntities": max_entities, + }, + "launchConfiguration": { + "type": launch_config_type, + "args": { + "server": server_args, + "loadBalancers": load_balancer_args, + }, + }, + "scalingPolicies": scaling_policies, + } + body + return body diff --git a/awx/lib/site-packages/pyrax/base_identity.py b/awx/lib/site-packages/pyrax/base_identity.py new file mode 100644 index 0000000000..5ed23ebb14 --- /dev/null +++ b/awx/lib/site-packages/pyrax/base_identity.py @@ -0,0 +1,635 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import ConfigParser +import datetime +import json +import os +import re +import requests +import urlparse + +import pyrax +import pyrax.exceptions as exc +from pyrax.resource import BaseResource +import pyrax.utils as utils + + +_pat = r""" + (\d{4})-(\d{2})-(\d{2}) # YYYY-MM-DD + T # Separator + (\d{2}):(\d{2}):(\d{2}) # HH:MM:SS + \.\d+ # Decimal and fractional seconds + ([\-\+])(\d{2}):(\d{2}) # TZ offset, in ±HH:00 format + """ +_utc_pat = r""" + (\d{4})-(\d{2})-(\d{2}) # YYYY-MM-DD + T # Separator + (\d{2}):(\d{2}):(\d{2}) # HH:MM:SS + \.?\d* # Decimal and fractional seconds + Z # UTC indicator + """ +API_DATE_PATTERN = re.compile(_pat, re.VERBOSE) +UTC_API_DATE_PATTERN = re.compile(_utc_pat, re.VERBOSE) +DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + +class Tenant(BaseResource): + pass + + +class User(BaseResource): + pass + + +class BaseAuth(object): + """ + This class handles all of the basic authentication requirements for working + with an OpenStack Cloud system. + """ + username = "" + password = "" + token = "" + expires = "" + tenant_id = "" + tenant_name = "" + authenticated = False + user_agent = "pyrax" + http_log_debug = False + _default_region = None + + + def __init__(self, username=None, password=None, token=None, + credential_file=None, region=None, timeout=None, verify_ssl=True): + + self.username = username + self.password = password + self.token = token + self.region = region + self._creds_file = credential_file + self._timeout = timeout + self.services = {} + self.regions = set() + self.verify_ssl = verify_ssl + + + @property + def auth_token(self): + """Simple alias to self.token.""" + return self.token + + + @property + def auth_endpoint(self): + """Abstracts out the logic for connecting to different auth endpoints.""" + return self._get_auth_endpoint() + + + def _get_auth_endpoint(self): + """Each subclass will have to implement its own method.""" + raise NotImplementedError("The _get_auth_endpoint() method must be " + "defined in Auth subclasses.") + + + def get_default_region(self): + """ + In cases where the region has not been specified, return the value to + use. Subclasses may use information in the service catalog to determine + the appropriate default value. + """ + return self._default_region + + + def set_credentials(self, username, password=None, region=None, + tenant_id=None, authenticate=False): + """Sets the username and password directly.""" + self.username = username + self.password = password + self.tenant_id = tenant_id + if region: + self.region = region + if authenticate: + self.authenticate() + + + def set_credential_file(self, credential_file, region=None, + tenant_id=tenant_id, authenticate=False): + """ + Reads in the credentials from the supplied file. It should be + a standard config file in the format: + + [keystone] + username = myusername + password = top_secret + tenant_id = my_id + + """ + self._creds_file = credential_file + cfg = ConfigParser.SafeConfigParser() + try: + if not cfg.read(credential_file): + # If the specified file does not exist, the parser will + # return an empty list + raise exc.FileNotFound("The specified credential file '%s' " + "does not exist" % credential_file) + except ConfigParser.MissingSectionHeaderError as e: + # The file exists, but doesn't have the correct format. + raise exc.InvalidCredentialFile(e) + try: + self._read_credential_file(cfg) + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e: + raise exc.InvalidCredentialFile(e) + if region: + self.region = region + if authenticate: + self.authenticate() + + + def auth_with_token(self, token, tenant_id=None, tenant_name=None): + """ + If a valid token is already known, this call will use it to generate + the service catalog. + """ + resp = self._call_token_auth(token, tenant_id, tenant_name) + resp_body = resp.json() + self._parse_response(resp_body) + self.authenticated = True + + + def _call_token_auth(self, token, tenant_id, tenant_name): + if not any((tenant_id, tenant_name)): + raise exc.MissingAuthSettings("You must supply either the tenant " + "name or tenant ID") + if tenant_id: + key = "tenantId" + val = tenant_id + else: + key = "tenantName" + val = tenant_name + body = {"auth": { + key: val, + "token": {"id": token}, + }} + headers = {"Content-Type": "application/json", + "Accept": "application/json", + } + resp = self.method_post("tokens", data=body, headers=headers, + std_headers=False) + if resp.status_code == 401: + # Invalid authorization + raise exc.AuthenticationFailed("Incorrect/unauthorized " + "credentials received") + elif resp.status_code > 299: + msg_dict = resp.json() + msg = msg_dict[msg_dict.keys()[0]]["message"] + raise exc.AuthenticationFailed("%s - %s." % (resp.reason, msg)) + return resp + + + def _read_credential_file(self, cfg): + """ + Implements the default (keystone) behavior. + """ + self.username = cfg.get("keystone", "username") + self.password = cfg.get("keystone", "password", raw=True) + self.tenant_id = cfg.get("keystone", "tenant_id") + + + def _get_credentials(self): + """ + Returns the current credentials in the format expected by + the authentication service. + """ + tenant_name = self.tenant_name or self.username + tenant_id = self.tenant_id or self.username + return {"auth": {"passwordCredentials": + {"username": self.username, + "password": self.password, + }, + "tenantId": tenant_id}} + + + # The following method_* methods wrap the _call() method. + def method_get(self, uri, admin=False, data=None, headers=None, + std_headers=True): + return self._call(requests.get, uri, admin, data, headers, std_headers) + + def method_head(self, uri, admin=False, data=None, headers=None, + std_headers=True): + return self._call(requests.head, uri, admin, data, headers, std_headers) + + def method_post(self, uri, admin=False, data=None, headers=None, + std_headers=True): + return self._call(requests.post, uri, admin, data, headers, std_headers) + + def method_put(self, uri, admin=False, data=None, headers=None, + std_headers=True): + return self._call(requests.put, uri, admin, data, headers, std_headers) + + def method_delete(self, uri, admin=False, data=None, headers=None, + std_headers=True): + return self._call(requests.delete, uri, admin, data, headers, + std_headers) + + + def _call(self, mthd, uri, admin, data, headers, std_headers): + """ + Handles all the common functionality required for API calls. Returns + the resulting response object. + """ + if not uri.startswith("http"): + uri = "/".join((self.auth_endpoint.rstrip("/"), uri)) + if admin: + # Admin calls use a different port + uri = re.sub(r":\d+/", ":35357/", uri) + if std_headers: + hdrs = self._standard_headers() + else: + hdrs = {} + if headers: + hdrs.update(headers) + jdata = json.dumps(data) if data else None + if self.http_log_debug: + print "REQ:", mthd.func_name.upper(), uri + print "HDRS:", hdrs + if data: + print "DATA", jdata + print + return mthd(uri, data=jdata, headers=hdrs, verify=self.verify_ssl) + + + def authenticate(self): + """ + Using the supplied credentials, connects to the specified + authentication endpoint and attempts to log in. If successful, + records the token information. + """ + creds = self._get_credentials() + headers = {"Content-Type": "application/json", + "Accept": "application/json", + } + resp = self.method_post("tokens", data=creds, headers=headers, + std_headers=False) + + if resp.status_code == 401: + # Invalid authorization + raise exc.AuthenticationFailed("Incorrect/unauthorized " + "credentials received") + elif resp.status_code > 299: + msg_dict = resp.json() + try: + msg = msg_dict[msg_dict.keys()[0]]["message"] + except KeyError: + msg = None + if msg: + err = "%s - %s." % (resp.reason, msg) + else: + err = "%s." % resp.reason + raise exc.AuthenticationFailed(err) + resp_body = resp.json() + self._parse_response(resp_body) + self.authenticated = True + + + def _parse_response(self, resp): + """Gets the authentication information from the returned JSON.""" + access = resp["access"] + token = access.get("token") + self.token = token["id"] + self.tenant_id = token["tenant"]["id"] + self.tenant_name = token["tenant"]["name"] + self.expires = self._parse_api_time(token["expires"]) + svc_cat = access.get("serviceCatalog") + self.services = {} + for svc in svc_cat: + # Replace any dashes with underscores. + # Also, some service types have RAX-specific identifiers; strip them. + typ = svc["type"].replace("-", "_").replace("rax:", "") + if typ == "compute": + if svc["name"].lower() == "cloudservers": + # First-generation Rackspace cloud servers + continue + self.services[typ] = dict(name=svc["name"], endpoints={}) + svc_ep = self.services[typ]["endpoints"] + for ep in svc["endpoints"]: + rgn = ep.get("region", "ALL") + self.regions.add(rgn) + svc_ep[rgn] = {} + svc_ep[rgn]["public_url"] = ep["publicURL"] + try: + svc_ep[rgn]["internal_url"] = ep["internalURL"] + except KeyError: + pass + self.regions.discard("ALL") + pyrax.regions = tuple(self.regions) + pyrax.services = tuple(self.services.keys()) + user = access["user"] + self.user = {} + self.user["id"] = user["id"] + self.username = self.user["name"] = user["name"] + self.user["roles"] = user["roles"] + + + def unauthenticate(self): + """ + Clears all authentication information. + """ + self.token = self.expires = self.tenant_id = self.tenant_name = "" + self.authenticated = False + self.services = {} + + + def _standard_headers(self): + """ + Returns a dict containing the standard headers for API calls. + """ + return {"Content-Type": "application/json", + "Accept": "application/json", + "X-Auth-Token": self.token, + "X-Auth-Project-Id": self.tenant_id, + } + + + def get_extensions(self): + """ + Returns a list of extensions enabled on this service. + """ + resp = self.method_get("extensions") + return resp.json().get("extensions", {}).get("values") + + + def get_token(self, force=False): + """Returns the auth token, if it is valid. If not, calls the auth endpoint + to get a new token. Passing 'True' to 'force' will force a call for a new + token, even if there already is a valid token. + """ + self.authenticated = self._has_valid_token() + if force or not self.authenticated: + self.authenticate() + return self.token + + + def _has_valid_token(self): + """ + This only checks the token's existence and expiration. If it has been + invalidated on the server, this method may indicate that the token is + valid when it might actually not be. + """ + return bool(self.token and (self.expires > datetime.datetime.now())) + + + def list_tokens(self): + """ + ADMIN ONLY. Returns a dict containing tokens, endpoints, user info, and + role metadata. + """ + resp = self.method_get("tokens/%s" % self.token, admin=True) + if resp.status_code in (401, 403): + raise exc.AuthorizationFailure("You must be an admin to make this " + "call.") + token_dct = resp.json() + return token_dct.get("access") + + + def check_token(self, token=None): + """ + ADMIN ONLY. Returns True or False, depending on whether the current + token is valid. + """ + if token is None: + token = self.token + resp = self.method_head("tokens/%s" % token, admin=True) + if resp.status_code in (401, 403): + raise exc.AuthorizationFailure("You must be an admin to make this " + "call.") + return 200 <= resp.status_code < 300 + + + def get_token_endpoints(self): + """ + ADMIN ONLY. Returns a list of all endpoints for the current auth token. + """ + resp = self.method_get("tokens/%s/endpoints" % self.token, admin=True) + if resp.status_code in (401, 403, 404): + raise exc.AuthorizationFailure("You are not authorized to list " + "token endpoints.") + token_dct = resp.json() + return token_dct.get("access", {}).get("endpoints") + + + def list_users(self): + """ + ADMIN ONLY. Returns a list of objects for all users for the tenant + (account) if this request is issued by a user holding the admin role + (identity:user-admin). + """ + resp = self.method_get("users", admin=True) + if resp.status_code in (401, 403, 404): + raise exc.AuthorizationFailure("You are not authorized to list " + "users.") + users = resp.json() + # The API is inconsistent; if only one user exists, it will not return + # a list. + if "users" in users: + users = users["users"] + else: + users = [users] + # The returned values may contain password data. Strip that out. + for user in users: + bad_keys = [key for key in user.keys() + if "password" in key.lower()] + for bad_key in bad_keys: + user.pop(bad_key) + return [User(self, user) for user in users] + + + def create_user(self, name, email, password=None, enabled=True): + """ + ADMIN ONLY. Creates a new user for this tenant (account). The username + and email address must be supplied. You may optionally supply the + password for this user; if not, the API server will generate a password + and return it in the 'password' attribute of the resulting User object. + NOTE: this is the ONLY time the password will be returned; after the + initial user creation, there is NO WAY to retrieve the user's password. + + You may also specify that the user should be created but not active by + passing False to the enabled parameter. + """ + # NOTE: the OpenStack docs say that the name key in the following dict + # is supposed to be 'username', but the service actually expects 'name'. + data = {"user": { + "name": name, + "email": email, + "enabled": enabled, + }} + if password: + data["user"]["OS-KSADM:password"] = password + resp = self.method_post("users", data=data, admin=True) + if resp.status_code == 201: + jresp = resp.json() + return User(self, jresp) + elif resp.status_code in (401, 403, 404): + raise exc.AuthorizationFailure("You are not authorized to create " + "users.") + elif resp.status_code == 409: + raise exc.DuplicateUser("User '%s' already exists." % name) + + + # Can we really update the ID? Docs seem to say we can + def update_user(self, user, email=None, username=None, + uid=None, enabled=None): + """ + ADMIN ONLY. Updates the user attributes with the supplied values. + """ + user_id = utils.get_id(user) + uri = "users/%s" % user_id + upd = {"id": user_id} + if email is not None: + upd["email"] = email + if username is not None: + upd["username"] = username + if enabled is not None: + upd["enabled"] = enabled + data = {"user": upd} + resp = self.method_put(uri, data=data) + if resp.status_code in (401, 403, 404): + raise exc.AuthorizationFailure("You are not authorized to update " + "users.") + return User(self, resp.json()) + + + def delete_user(self, user): + """ + ADMIN ONLY. Removes the user from the system. There is no 'undo' + available, so you should be certain that the user specified is the user + you wish to delete. + """ + user_id = utils.get_id(user) + uri = "users/%s" % user_id + resp = self.method_delete(uri) + if resp.status_code == 404: + raise exc.UserNotFound("User '%s' does not exist." % user) + elif resp.status_code in (401, 403): + raise exc.AuthorizationFailure("You are not authorized to delete " + "users.") + + + def list_roles_for_user(self, user): + """ + ADMIN ONLY. Returns a list of roles for the specified user. Each role + will be a 3-tuple, consisting of (role_id, role_name, + role_description). + """ + user_id = utils.get_id(user) + uri = "users/%s/roles" % user_id + resp = self.method_get(uri) + if resp.status_code in (401, 403): + raise exc.AuthorizationFailure("You are not authorized to list " + "user roles.") + roles = resp.json().get("roles") + return roles + + + def get_tenant(self): + """ + Returns the tenant for the current user. + """ + tenants = self._list_tenants(admin=False) + if tenants: + return tenants[0] + return None + + + def list_tenants(self): + """ + ADMIN ONLY. Returns a list of all tenants. + """ + return self._list_tenants(admin=True) + + + def _list_tenants(self, admin): + """ + Returns either a list of all tenants (admin=True), or the tenant for + the currently-authenticated user (admin=False). + """ + resp = self.method_get("tenants", admin=admin) + if 200 <= resp.status_code < 300: + tenants = resp.json().get("tenants", []) + return [Tenant(self, tenant) for tenant in tenants] + elif resp.status_code in (401, 403): + raise exc.AuthorizationFailure("You are not authorized to list " + "tenants.") + else: + raise exc.TenantNotFound("Could not get a list of tenants.") + + + def create_tenant(self, name, description=None, enabled=True): + """ + ADMIN ONLY. Creates a new tenant. + """ + data = {"tenant": { + "name": name, + "enabled": enabled, + }} + if description: + data["tenant"]["description"] = description + resp = self.method_post("tenants", data=data) + return Tenant(self, resp.json()) + + + def update_tenant(self, tenant, name=None, description=None, enabled=True): + """ + ADMIN ONLY. Updates an existing tenant. + """ + tenant_id = utils.get_id(tenant) + data = {"tenant": { + "enabled": enabled, + }} + if name: + data["tenant"]["name"] = name + if description: + data["tenant"]["description"] = description + resp = self.method_put("tenants/%s" % tenant_id, data=data) + return Tenant(self, resp.json()) + + + def delete_tenant(self, tenant): + """ + ADMIN ONLY. Removes the tenant from the system. There is no 'undo' + available, so you should be certain that the tenant specified is the + tenant you wish to delete. + """ + tenant_id = utils.get_id(tenant) + uri = "tenants/%s" % tenant_id + resp = self.method_delete(uri) + if resp.status_code == 404: + raise exc.TenantNotFound("Tenant '%s' does not exist." % tenant) + + + @staticmethod + def _parse_api_time(timestr): + """ + Typical expiration times returned from the auth server are in this format: + 2012-05-02T14:27:40.000-05:00 + They can also be returned as a UTC value in this format: + 2012-05-02T14:27:40.000Z + This method returns a proper datetime object from either of these formats. + """ + try: + reg_groups = API_DATE_PATTERN.match(timestr).groups() + yr, mth, dy, hr, mn, sc, off_sign, off_hr, off_mn = reg_groups + except AttributeError: + # UTC dates don't show offsets. + utc_groups = UTC_API_DATE_PATTERN.match(timestr).groups() + yr, mth, dy, hr, mn, sc = utc_groups + off_sign = "+" + off_hr = off_mn = 0 + base_dt = datetime.datetime(int(yr), int(mth), int(dy), int(hr), + int(mn), int(sc)) + delta = datetime.timedelta(hours=int(off_hr), minutes=int(off_mn)) + if off_sign == "+": + # Time is greater than UTC + ret = base_dt - delta + else: + ret = base_dt + delta + return ret diff --git a/awx/lib/site-packages/pyrax/cf_wrapper/__init__.py b/awx/lib/site-packages/pyrax/cf_wrapper/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/pyrax/cf_wrapper/client.py b/awx/lib/site-packages/pyrax/cf_wrapper/client.py new file mode 100644 index 0000000000..1c5638ab42 --- /dev/null +++ b/awx/lib/site-packages/pyrax/cf_wrapper/client.py @@ -0,0 +1,1350 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import datetime +from functools import wraps +import hashlib +import hmac +# Use eventlet if available +try: + import eventlet.green.httplib as httplib +except ImportError: + import httplib +import locale +import math +import os +import re +import socket +import threading +import time +import urllib +import urlparse +import uuid +import mimetypes + +from swiftclient import client as _swift_client +import pyrax +from pyrax.cf_wrapper.container import Container +from pyrax.cf_wrapper.storage_object import StorageObject +import pyrax.utils as utils +import pyrax.exceptions as exc + + +EARLY_DATE_STR = "1900-01-01T00:00:00" +DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" +HEAD_DATE_FORMAT = "%a, %d %b %Y %H:%M:%S %Z" +CONNECTION_TIMEOUT = 20 +CONNECTION_RETRIES = 5 +AUTH_ATTEMPTS = 2 + +no_such_container_pattern = re.compile( + r"Container (?:GET|HEAD) failed: .+/(.+) 404") +no_such_object_pattern = re.compile(r"Object (?:GET|HEAD) failed: .+/(.+) 404") +etag_fail_pat = r"Object PUT failed: .+/([^/]+)/(\S+) 422 Unprocessable Entity" +etag_failed_pattern = re.compile(etag_fail_pat) + + +def handle_swiftclient_exception(fnc): + @wraps(fnc) + def _wrapped(self, *args, **kwargs): + attempts = 0 + clt_url = self.connection.url + + def close_swiftclient_conn(conn): + """Swiftclient often leaves the connection open.""" + try: + conn.http_conn[1].close() + except Exception: + pass + + while attempts < AUTH_ATTEMPTS: + attempts += 1 + try: + close_swiftclient_conn(self.connection) + ret = fnc(self, *args, **kwargs) + return ret + except _swift_client.ClientException as e: + if attempts < AUTH_ATTEMPTS: + # Assume it is an auth failure. Re-auth and retry. + ### NOTE: This is a hack to get around an apparent bug + ### in python-swiftclient when using Rackspace auth. + pyrax.authenticate(connect=False) + if pyrax.identity.authenticated: + pyrax.plug_hole_in_swiftclient_auth(self, clt_url) + continue + str_error = "%s" % e + bad_container = no_such_container_pattern.search(str_error) + if bad_container: + raise exc.NoSuchContainer("Container '%s' doesn't exist" % + bad_container.groups()[0]) + bad_object = no_such_object_pattern.search(str_error) + if bad_object: + raise exc.NoSuchObject("Object '%s' doesn't exist" % + bad_object.groups()[0]) + failed_upload = etag_failed_pattern.search(str_error) + if failed_upload: + cont, fname = failed_upload.groups() + raise exc.UploadFailed("Upload of file '%(fname)s' to " + "container '%(cont)s' failed." % locals()) + if e.http_status == 404: + raise exc.NoSuchObject("The requested object/container " + "does not exist.") + # Not handled; re-raise + raise + return _wrapped + + + +class CFClient(object): + """ + Wraps the calls to swiftclient with objects representing Containers + and StorageObjects. + + These classes allow a developer to work with regular Python objects + instead of calling functions that return primitive types. + """ + # Constants used in metadata headers + account_meta_prefix = "X-Account-Meta-" + container_meta_prefix = "X-Container-Meta-" + object_meta_prefix = "X-Object-Meta-" + cdn_meta_prefix = "X-Cdn-" + # Defaults for CDN + cdn_enabled = False + default_cdn_ttl = 86400 + _container_cache = {} + # Upload size limit + max_file_size = 5368709119 # 5GB - 1 + # Folder upload status dict. Each upload will generate its own UUID key. + # The app can use that key query the status of the upload. This dict + # will also be used to hold the flag to interrupt uploads in progress. + folder_upload_status = {} + + + def __init__(self, auth_endpoint, username, api_key=None, password=None, + tenant_name=None, preauthurl=None, preauthtoken=None, + auth_version="2", os_options=None, verify_ssl=True, + http_log_debug=False): + self.connection = None + self.cdn_connection = None + self.http_log_debug = http_log_debug + self._http_log = _swift_client.http_log + os.environ["SWIFTCLIENT_DEBUG"] = "True" if http_log_debug else "" + self._make_connections(auth_endpoint, username, api_key, password, + tenant_name=tenant_name, preauthurl=preauthurl, + preauthtoken=preauthtoken, auth_version=auth_version, + os_options=os_options, verify_ssl=verify_ssl, + http_log_debug=http_log_debug) + + + def _make_connections(self, auth_endpoint, username, api_key, password, + tenant_name=None, preauthurl=None, preauthtoken=None, + auth_version="2", os_options=None, verify_ssl=True, + http_log_debug=None): + cdn_url = os_options.pop("object_cdn_url", None) + pw_key = api_key or password + insecure = not verify_ssl + self.connection = Connection(authurl=auth_endpoint, user=username, + key=pw_key, tenant_name=tenant_name, preauthurl=preauthurl, + preauthtoken=preauthtoken, auth_version=auth_version, + os_options=os_options, insecure=insecure, + http_log_debug=http_log_debug) + if cdn_url: + self.connection._make_cdn_connection(cdn_url) + + + def _massage_metakeys(self, dct, prfx): + """ + Returns a copy of the supplied dictionary, prefixing any keys that do + not begin with the specified prefix accordingly. Also lowercases all of + the keys since that's what is returned by the API. + """ + lowprefix = prfx.lower() + ret = {} + for k, v in dct.iteritems(): + if not k.lower().startswith(lowprefix): + k = "%s%s" % (prfx, k) + ret[k.lower()] = v + return ret + + + def _resolve_name(self, val): + return val if isinstance(val, basestring) else val.name + + + @handle_swiftclient_exception + def get_account_metadata(self): + headers = self.connection.head_account() + prfx = self.account_meta_prefix.lower() + ret = {} + for hkey, hval in headers.iteritems(): + if hkey.lower().startswith(prfx): + ret[hkey] = hval + return ret + + + @handle_swiftclient_exception + def set_account_metadata(self, metadata, clear=False, + extra_info=None): + """ + Accepts a dictionary of metadata key/value pairs and updates + the specified account metadata with them. + + If 'clear' is True, any existing metadata is deleted and only + the passed metadata is retained. Otherwise, the values passed + here update the account's metadata. + + extra_info is an optional dictionary which will be + populated with 'status', 'reason', and 'headers' keys from the + underlying swiftclient call. + """ + # Add the metadata prefix, if needed. + massaged = self._massage_metakeys(metadata, self.account_meta_prefix) + new_meta = {} + if clear: + curr_meta = self.get_account_metadata() + for ckey in curr_meta: + new_meta[ckey] = "" + new_meta.update(massaged) + self.connection.post_account(new_meta, + response_dict=extra_info) + + + @handle_swiftclient_exception + def get_temp_url_key(self): + """ + Returns the current TempURL key, or None if it has not been set. + """ + key = "%stemp-url-key" % self.account_meta_prefix.lower() + meta = self.get_account_metadata().get(key) + return meta + + + @handle_swiftclient_exception + def set_temp_url_key(self, key=None): + """ + Sets the key for the Temporary URL for the account. It should be a key + that is secret to the owner. + + If no key is provided, a UUID value will be generated and used. It can + later be obtained by calling get_temp_url_key(). + """ + if key is None: + key = uuid.uuid4().hex + meta = {"Temp-Url-Key": key} + self.set_account_metadata(meta) + + + def get_temp_url(self, container, obj, seconds, method="GET"): + """ + Given a storage object in a container, returns a URL that can be used + to access that object. The URL will expire after `seconds` seconds. + + The only methods supported are GET and PUT. Anything else will raise + an InvalidTemporaryURLMethod exception. + """ + cname = self._resolve_name(container) + oname = self._resolve_name(obj) + mod_method = method.upper().strip() + if mod_method not in ("GET", "PUT"): + raise exc.InvalidTemporaryURLMethod("Method must be either 'GET' " + "or 'PUT'; received '%s'." % method) + key = self.get_temp_url_key() + if not key: + raise exc.MissingTemporaryURLKey("You must set the key for " + "Temporary URLs before you can generate them. This is " + "done via the `set_temp_url_key()` method.") + conn_url = self.connection.url + v1pos = conn_url.index("/v1/") + base_url = conn_url[:v1pos] + path_parts = (conn_url[v1pos:], cname, oname) + cleaned = (part.strip("/\\") for part in path_parts) + pth = "/%s" % "/".join(cleaned) + if isinstance(pth, unicode): + pth = pth.encode(pyrax.get_encoding()) + expires = int(time.time() + int(seconds)) + hmac_body = "%s\n%s\n%s" % (mod_method, expires, pth) + try: + sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest() + except TypeError as e: + raise exc.UnicodePathError("Due to a bug in Python, the TempURL " + "function only works with ASCII object paths.") + temp_url = "%s%s?temp_url_sig=%s&temp_url_expires=%s" % (base_url, pth, + sig, expires) + return temp_url + + + def delete_object_in_seconds(self, cont, obj, seconds, + extra_info=None): + """ + Sets the object in the specified container to be deleted after the + specified number of seconds. + """ + cname = self._resolve_name(cont) + oname = self._resolve_name(obj) + headers = {"X-Delete-After": seconds} + self.connection.post_object(cname, oname, headers=headers, + response_dict=extra_info) + + + @handle_swiftclient_exception + def get_container_metadata(self, container): + """Returns a dictionary containing the metadata for the container.""" + cname = self._resolve_name(container) + headers = self.connection.head_container(cname) + prfx = self.container_meta_prefix.lower() + ret = {} + for hkey, hval in headers.iteritems(): + if hkey.lower().startswith(prfx): + ret[hkey] = hval + return ret + + + @handle_swiftclient_exception + def set_container_metadata(self, container, metadata, clear=False, + extra_info=None): + """ + Accepts a dictionary of metadata key/value pairs and updates + the specified container metadata with them. + + If 'clear' is True, any existing metadata is deleted and only + the passed metadata is retained. Otherwise, the values passed + here update the container's metadata. + + extra_info is an optional dictionary which will be + populated with 'status', 'reason', and 'headers' keys from the + underlying swiftclient call. + """ + # Add the metadata prefix, if needed. + massaged = self._massage_metakeys(metadata, self.container_meta_prefix) + cname = self._resolve_name(container) + new_meta = {} + if clear: + curr_meta = self.get_container_metadata(cname) + for ckey in curr_meta: + new_meta[ckey] = "" + new_meta.update(massaged) + self.connection.post_container(cname, new_meta, + response_dict=extra_info) + + + @handle_swiftclient_exception + def remove_container_metadata_key(self, container, key, + extra_info=None): + """ + Removes the specified key from the container's metadata. If the key + does not exist in the metadata, nothing is done. + """ + meta_dict = {key: ""} + # Add the metadata prefix, if needed. + massaged = self._massage_metakeys(meta_dict, self.container_meta_prefix) + cname = self._resolve_name(container) + self.connection.post_container(cname, massaged, + response_dict=extra_info) + + + @handle_swiftclient_exception + def get_container_cdn_metadata(self, container): + """ + Returns a dictionary containing the CDN metadata for the container. + """ + cname = self._resolve_name(container) + response = self.connection.cdn_request("HEAD", [cname]) + headers = response.getheaders() + # Read the response to force it to close for the next request. + response.read() + # headers is a list of 2-tuples instead of a dict. + return dict(headers) + + + @handle_swiftclient_exception + def set_container_cdn_metadata(self, container, metadata): + """ + Accepts a dictionary of metadata key/value pairs and updates + the specified container metadata with them. + + NOTE: arbitrary metadata headers are not allowed. The only metadata + you can update are: X-Log-Retention, X-CDN-enabled, and X-TTL. + """ + ct = self.get_container(container) + allowed = ("x-log-retention", "x-cdn-enabled", "x-ttl") + hdrs = {} + bad = [] + for mkey, mval in metadata.iteritems(): + if mkey.lower() not in allowed: + bad.append(mkey) + continue + hdrs[mkey] = str(mval) + if bad: + raise exc.InvalidCDNMetadata("The only CDN metadata you can " + "update are: X-Log-Retention, X-CDN-enabled, and X-TTL. " + "Received the following illegal item(s): %s" % + ", ".join(bad)) + response = self.connection.cdn_request("POST", [ct.name], hdrs=hdrs) + response.close() + + + @handle_swiftclient_exception + def get_object_metadata(self, container, obj): + """Retrieves any metadata for the specified object.""" + cname = self._resolve_name(container) + oname = self._resolve_name(obj) + headers = self.connection.head_object(cname, oname) + prfx = self.object_meta_prefix.lower() + ret = {} + for hkey, hval in headers.iteritems(): + if hkey.lower().startswith(prfx): + ret[hkey] = hval + return ret + + + @handle_swiftclient_exception + def set_object_metadata(self, container, obj, metadata, clear=False, + extra_info=None): + """ + Accepts a dictionary of metadata key/value pairs and updates + the specified object metadata with them. + + If 'clear' is True, any existing metadata is deleted and only + the passed metadata is retained. Otherwise, the values passed + here update the object's metadata. + + extra_info is an optional dictionary which will be + populated with 'status', 'reason', and 'headers' keys from the + underlying swiftclient call. + """ + # Add the metadata prefix, if needed. + massaged = self._massage_metakeys(metadata, self.object_meta_prefix) + cname = self._resolve_name(container) + oname = self._resolve_name(obj) + new_meta = {} + # Note that the API for object POST is the opposite of that for + # container POST: for objects, all current metadata is deleted, + # whereas for containers you need to set the values to an empty + # string to delete them. + if not clear: + obj_meta = self.get_object_metadata(cname, oname) + new_meta = self._massage_metakeys(obj_meta, self.object_meta_prefix) + new_meta.update(massaged) + # Remove any empty values, since the object metadata API will + # store them. + to_pop = [] + for key, val in new_meta.iteritems(): + if not val: + to_pop.append(key) + for key in to_pop: + new_meta.pop(key) + self.connection.post_object(cname, oname, new_meta, + response_dict=extra_info) + + + @handle_swiftclient_exception + def remove_object_metadata_key(self, container, obj, key): + """ + Removes the specified key from the storage object's metadata. If the key + does not exist in the metadata, nothing is done. + """ + self.set_object_metadata(container, obj, {key: ""}) + + + @handle_swiftclient_exception + def create_container(self, name, extra_info=None): + """Creates a container with the specified name. + + extra_info is an optional dictionary which will be + populated with 'status', 'reason', and 'headers' keys from the + underlying swiftclient call. + """ + name = self._resolve_name(name) + self.connection.put_container(name, response_dict=extra_info) + return self.get_container(name) + + + @handle_swiftclient_exception + def delete_container(self, container, del_objects=False, extra_info=None): + """ + Deletes the specified container. This will fail if the container + still has objects stored in it; if that's the case and you want + to delete the container anyway, set del_objects to True, and + the container's objects will be deleted before the container is + deleted. + + extra_info is an optional dictionary which will be + populated with 'status', 'reason', and 'headers' keys from the + underlying swiftclient call. + """ + self._remove_container_from_cache(container) + cname = self._resolve_name(container) + if del_objects: + objs = self.get_container_object_names(cname) + for obj in objs: + self.delete_object(cname, obj) + self.connection.delete_container(cname, response_dict=extra_info) + return True + + + def _remove_container_from_cache(self, container): + """Removes the container from the cache.""" + nm = self._resolve_name(container) + self._container_cache.pop(nm, None) + + + @handle_swiftclient_exception + def delete_object(self, container, name, extra_info=None): + """Deletes the specified object from the container. + + extra_info is an optional dictionary which will be + populated with 'status', 'reason', and 'headers' keys from the + underlying swiftclient call. + """ + ct = self.get_container(container) + ct.remove_from_cache(name) + oname = self._resolve_name(name) + self.connection.delete_object(ct.name, oname, + response_dict=extra_info) + return True + + + @handle_swiftclient_exception + def get_object(self, container, obj): + """Returns a StorageObject instance for the object in the container.""" + cname = self._resolve_name(container) + oname = self._resolve_name(obj) + obj_info = self.connection.head_object(cname, oname) + # Need to convert last modified time to a datetime object. + # Times are returned in default locale format, so we need to read + # them as such, no matter what the locale setting may be. + lm_str = obj_info["last-modified"] + orig_locale = locale.getlocale(locale.LC_TIME) + locale.setlocale(locale.LC_TIME, (None, None)) + tm_tuple = time.strptime(lm_str, HEAD_DATE_FORMAT) + locale.setlocale(locale.LC_TIME, orig_locale) + dttm = datetime.datetime.fromtimestamp(time.mktime(tm_tuple)) + # Now convert it back to the format returned by GETting the object. + dtstr = dttm.strftime(DATE_FORMAT) + obj = StorageObject(self, self.get_container(container), + name=oname, content_type=obj_info["content-type"], + total_bytes=int(obj_info["content-length"]), + last_modified=dtstr, etag=obj_info["etag"]) + return obj + + + @handle_swiftclient_exception + def store_object(self, container, obj_name, data, content_type=None, + etag=None, content_encoding=None, ttl=None, return_none=False, + extra_info=None): + """ + Creates a new object in the specified container, and populates it with + the given data. A StorageObject reference to the uploaded file + will be returned, unless 'return_none' is set to True. + + extra_info is an optional dictionary which will be + populated with 'status', 'reason', and 'headers' keys from the + underlying swiftclient call. + """ + cont = self.get_container(container) + headers = {} + if content_encoding is not None: + headers["Content-Encoding"] = content_encoding + if ttl is not None: + headers["X-Delete-After"] = ttl + with utils.SelfDeletingTempfile() as tmp: + with open(tmp, "wb") as tmpfile: + try: + tmpfile.write(data) + except UnicodeEncodeError: + udata = data.encode("utf-8") + tmpfile.write(udata) + with open(tmp, "rb") as tmpfile: + self.connection.put_object(cont.name, obj_name, + contents=tmpfile, content_type=content_type, etag=etag, + headers=headers, response_dict=extra_info) + if return_none: + return None + else: + return self.get_object(container, obj_name) + + + @handle_swiftclient_exception + def copy_object(self, container, obj, new_container, new_obj_name=None, + extra_info=None): + """ + Copies the object to the new container, optionally giving it a new name. + If you copy to the same container, you must supply a different name. + """ + cont = self.get_container(container) + obj = self.get_object(cont, obj) + new_cont = self.get_container(new_container) + if new_obj_name is None: + new_obj_name = obj.name + hdrs = {"X-Copy-From": "/%s/%s" % (cont.name, obj.name)} + return self.connection.put_object(new_cont.name, new_obj_name, + contents=None, headers=hdrs, response_dict=extra_info) + + + @handle_swiftclient_exception + def move_object(self, container, obj, new_container, new_obj_name=None): + """ + Works just like copy_object, except that the source object is deleted + after a successful copy. + """ + new_obj_etag = self.copy_object(container, obj, new_container, + new_obj_name=new_obj_name) + if new_obj_etag: + # Copy succeeded; delete the original. + self.delete_object(container, obj) + return new_obj_etag + + @handle_swiftclient_exception + def change_object_content_type(self, container, obj, new_ctype, + guess=False, extra_info=None): + """ + Copies object to itself, but applies a new content-type. The guess + feature requires the container to be CDN-enabled. If not then the + content-type must be supplied. If using guess with a CDN-enabled + container, new_ctype can be set to None. + Failure during the put will result in a swift exception. + """ + cont = self.get_container(container) + obj = self.get_object(cont, obj) + if guess and cont.cdn_enabled: + #Test against the CDN url to guess the content-type. + obj_url = "%s/%s" % (cont.cdn_uri, obj.name) + new_ctype = mimetypes.guess_type(obj_url)[0] + hdrs = {"X-Copy-From": "/%s/%s" % (cont.name, obj.name)} + self.connection.put_object(cont.name, obj.name, contents=None, + headers=hdrs, content_type=new_ctype, + response_dict=extra_info) + cont.remove_from_cache(obj.name) + return + + @handle_swiftclient_exception + def upload_file(self, container, file_or_path, obj_name=None, + content_type=None, etag=None, return_none=False, + content_encoding=None, ttl=None, extra_info=None, + content_length=None): + """ + Uploads the specified file to the container. If no name is supplied, + the file's name will be used. Either a file path or an open file-like + object may be supplied. A StorageObject reference to the uploaded file + will be returned, unless 'return_none' is set to True. + + You may optionally set the `content_type` and `content_encoding` + parameters; pyrax will create the appropriate headers when the object + is stored. + + If the size of the file is known, it can be passed as `content_length`. + + If you wish for the object to be temporary, specify the time it should + be stored in seconds in the `ttl` parameter. If this is specified, the + object will be deleted after that number of seconds. + """ + # TODO-BC: response_dict when looping? as a list of them? + cont = self.get_container(container) + + def get_file_size(fileobj): + """Returns the size of a file-like object.""" + currpos = fileobj.tell() + fileobj.seek(0, 2) + total_size = fileobj.tell() + fileobj.seek(currpos) + return total_size + + def upload(fileobj, content_type, etag, headers): + if isinstance(fileobj, basestring): + # This is an empty directory file + fsize = 0 + else: + if content_length is None: + fsize = get_file_size(fileobj) + else: + fsize = content_length + if fsize < self.max_file_size: + # We can just upload it as-is. + return self.connection.put_object(cont.name, obj_name, + contents=fileobj, content_type=content_type, + etag=etag, headers=headers, response_dict=extra_info) + # Files larger than self.max_file_size must be segmented + # and uploaded separately. + num_segments = int(math.ceil(float(fsize) / self.max_file_size)) + digits = int(math.log10(num_segments)) + 1 + # NOTE: This could be greatly improved with threading or other + # async design. + for segment in xrange(num_segments): + sequence = str(segment + 1).zfill(digits) + seg_name = "%s.%s" % (fname, sequence) + with utils.SelfDeletingTempfile() as tmpname: + with open(tmpname, "wb") as tmp: + tmp.write(fileobj.read(self.max_file_size)) + with open(tmpname, "rb") as tmp: + # We have to calculate the etag for each segment + etag = utils.get_checksum(tmp) + self.connection.put_object(cont.name, seg_name, + contents=tmp, content_type=content_type, + etag=etag, headers=headers, + response_dict=extra_info) + # Upload the manifest + headers["X-Object-Meta-Manifest"] = "%s." % fname + return self.connection.put_object(cont.name, fname, + contents=None, headers=headers, + response_dict=extra_info) + + ispath = isinstance(file_or_path, basestring) + if ispath: + # Make sure it exists + if not os.path.exists(file_or_path): + raise exc.FileNotFound("The file '%s' does not exist" % + file_or_path) + fname = os.path.basename(file_or_path) + else: + try: + fname = file_or_path.name + except AttributeError: + fname = None + if not obj_name: + obj_name = fname + if not obj_name: + raise InvalidUploadID("No filename provided and/or it cannot be " + "inferred from context") + + headers = {} + if content_encoding is not None: + headers["Content-Encoding"] = content_encoding + if ttl is not None: + headers["X-Delete-After"] = ttl + + if ispath and os.path.isfile(file_or_path): + # Need to wrap the call in a context manager + with open(file_or_path, "rb") as ff: + upload(ff, content_type, etag, headers) + else: + upload(file_or_path, content_type, etag, headers) + if return_none: + return None + else: + return self.get_object(container, obj_name) + + + def upload_folder(self, folder_path, container=None, ignore=None, ttl=None): + """ + Convenience method for uploading an entire folder, including any + sub-folders, to Cloud Files. + + All files will be uploaded to objects with the same name as the file. + In the case of nested folders, files will be named with the full path + relative to the base folder. E.g., if the folder you specify contains a + folder named 'docs', and 'docs' contains a file named 'install.html', + that file will be uploaded to an object named 'docs/install.html'. + + If 'container' is specified, the folder's contents will be uploaded to + that container. If it is not specified, a new container with the same + name as the specified folder will be created, and the files uploaded to + this new container. + + You can selectively ignore files by passing either a single pattern or + a list of patterns; these will be applied to the individual folder and + file names, and any names that match any of the 'ignore' patterns will + not be uploaded. The patterns should be standard *nix-style shell + patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as + 'program.pyc' and 'abcpyc'. + + The upload will happen asynchronously; in other words, the call to + upload_folder() will generate a UUID and return a 2-tuple of (UUID, + total_bytes) immediately. Uploading will happen in the background; your + app can call get_uploaded(uuid) to get the current status of the + upload. When the upload is complete, the value returned by + get_uploaded(uuid) will match the total_bytes for the upload. + + If you start an upload and need to cancel it, call + cancel_folder_upload(uuid), passing the uuid returned by the initial + call. It will then be up to you to either keep or delete the + partially-uploaded content. + + If you specify a `ttl` parameter, the uploaded files will be deleted + after that number of seconds. + """ + if not os.path.isdir(folder_path): + raise exc.FolderNotFound("No such folder: '%s'" % folder_path) + + ignore = utils.coerce_string_to_list(ignore) + total_bytes = utils.folder_size(folder_path, ignore) + upload_key = str(uuid.uuid4()) + self.folder_upload_status[upload_key] = {"continue": True, + "total_bytes": total_bytes, + "uploaded": 0, + } + self._upload_folder_in_background(folder_path, container, ignore, + upload_key, ttl) + return (upload_key, total_bytes) + + + def _upload_folder_in_background(self, folder_path, container, ignore, + upload_key, ttl=None): + """Runs the folder upload in the background.""" + uploader = FolderUploader(folder_path, container, ignore, upload_key, + self, ttl=ttl) + uploader.start() + + + def sync_folder_to_container(self, folder_path, container, delete=False, + include_hidden=False, ignore=None, ignore_timestamps=False): + """ + Compares the contents of the specified folder, and checks to make sure + that the corresponding object is present in the specified container. If + there is no remote object matching the local file, it is created. If a + matching object exists, the etag is examined to determine if the object + in the container matches the local file; if they differ, the container + is updated with the local file if the local file is newer when + `ignore_timestamps' is False (default). If `ignore_timestamps` is True, + the object is overwritten with the local file contents whenever the + etags differ. NOTE: the timestamp of a remote object is the time it was + uploaded, not the original modification time of the file stored in that + object. Unless 'include_hidden' is True, files beginning with an + initial period are ignored. + + If the 'delete' option is True, any objects in the container that do + not have corresponding files in the local folder are deleted. + + You can selectively ignore files by passing either a single pattern or + a list of patterns; these will be applied to the individual folder and + file names, and any names that match any of the 'ignore' patterns will + not be uploaded. The patterns should be standard *nix-style shell + patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as + 'program.pyc' and 'abcpyc'. """ + cont = self.get_container(container) + self._local_files = [] + self._sync_folder_to_container(folder_path, cont, prefix="", + delete=delete, include_hidden=include_hidden, ignore=ignore, + ignore_timestamps=ignore_timestamps) + + + def _sync_folder_to_container(self, folder_path, cont, prefix, delete, + include_hidden, ignore, ignore_timestamps): + """ + This is the internal method that is called recursively to handle + nested folder structures. + """ + fnames = os.listdir(folder_path) + ignore = utils.coerce_string_to_list(ignore) + if not include_hidden: + ignore.append(".*") + for fname in fnames: + if utils.match_pattern(fname, ignore): + continue + pth = os.path.join(folder_path, fname) + if os.path.isdir(pth): + subprefix = fname + if prefix: + subprefix = "%s/%s" % (prefix, subprefix) + self._sync_folder_to_container(pth, cont, prefix=subprefix, + delete=delete, include_hidden=include_hidden, + ignore=ignore, ignore_timestamps=ignore_timestamps) + continue + self._local_files.append(os.path.join(prefix, fname)) + local_etag = utils.get_checksum(pth) + fullname = fname + if prefix: + fullname = "%s/%s" % (prefix, fname) + try: + obj = cont.get_object(fullname) + obj_etag = obj.etag + except exc.NoSuchObject: + obj = None + obj_etag = None + if local_etag != obj_etag: + if not ignore_timestamps: + if obj: + obj_time_str = obj.last_modified[:19] + else: + obj_time_str = EARLY_DATE_STR + local_mod = datetime.datetime.utcfromtimestamp( + os.stat(pth).st_mtime) + local_mod_str = local_mod.isoformat() + if obj_time_str >= local_mod_str: + # Remote object is newer + continue + cont.upload_file(pth, obj_name=fullname, etag=local_etag, + return_none=True) + if delete and not prefix: + self._delete_objects_not_in_list(cont) + + + def _delete_objects_not_in_list(self, cont): + """ + Finds all the objects in the specified container that are not present + in the self._local_files list, and deletes them. + """ + for obj in cont.get_objects(full_listing=True): + objname = obj.name + if isinstance(objname, unicode): + objname = objname.encode(pyrax.encoding) + if objname not in self._local_files: + obj.delete() + + + def _valid_upload_key(fnc): + def wrapped(self, upload_key, *args, **kwargs): + try: + self.folder_upload_status[upload_key] + except KeyError: + raise exc.InvalidUploadID("There is no folder upload with the " + "key '%s'." % upload_key) + return fnc(self, upload_key, *args, **kwargs) + return wrapped + + + @_valid_upload_key + def _update_progress(self, upload_key, size): + self.folder_upload_status[upload_key]["uploaded"] += size + + + @_valid_upload_key + def get_uploaded(self, upload_key): + """Returns the number of bytes uploaded for the specified process.""" + return self.folder_upload_status[upload_key]["uploaded"] + + + @_valid_upload_key + def cancel_folder_upload(self, upload_key): + """ + Cancels any folder upload happening in the background. If there is no + such upload in progress, calling this method has no effect. + """ + self.folder_upload_status[upload_key]["continue"] = False + + + @_valid_upload_key + def _should_abort_folder_upload(self, upload_key): + """ + Returns True if the user has canceled upload; returns False otherwise. + """ + return not self.folder_upload_status[upload_key]["continue"] + + + @handle_swiftclient_exception + def fetch_object(self, container, obj, include_meta=False, + chunk_size=None, extra_info=None): + """ + Fetches the object from storage. + + If 'include_meta' is False, only the bytes representing the + file is returned. + + Note: if 'chunk_size' is defined, you must fully read the object's + contents before making another request. + + When 'include_meta' is True, what is returned from this method is a + 2-tuple: + Element 0: a dictionary containing metadata about the file. + Element 1: a stream of bytes representing the object's contents. + + extra_info is an optional dictionary which will be + populated with 'status', 'reason', and 'headers' keys from the + underlying swiftclient call. + """ + cname = self._resolve_name(container) + oname = self._resolve_name(obj) + (meta, data) = self.connection.get_object(cname, oname, + resp_chunk_size=chunk_size, response_dict=extra_info) + if include_meta: + return (meta, data) + else: + return data + + + @handle_swiftclient_exception + def download_object(self, container, obj, directory, structure=True): + """ + Fetches the object from storage, and writes it to the specified + directory. The directory must exist before calling this method. + + If the object name represents a nested folder structure, such as + "foo/bar/baz.txt", that folder structure will be created in the target + directory by default. If you do not want the nested folders to be + created, pass `structure=False` in the parameters. + """ + if not os.path.isdir(directory): + raise exc.FolderNotFound("The directory '%s' does not exist." % + directory) + obj_name = self._resolve_name(obj) + path, fname = os.path.split(obj_name) + if structure: + fullpath = os.path.join(directory, path) + if not os.path.exists(fullpath): + os.makedirs(fullpath) + target = os.path.join(fullpath, fname) + else: + target = os.path.join(directory, fname) + with open(target, "wb") as dl: + dl.write(self.fetch_object(container, obj)) + + + @handle_swiftclient_exception + def get_all_containers(self, limit=None, marker=None, **parms): + hdrs, conts = self.connection.get_container("") + ret = [Container(self, name=cont["name"], object_count=cont["count"], + total_bytes=cont["bytes"]) for cont in conts] + return ret + + + @handle_swiftclient_exception + def get_container(self, container): + cname = self._resolve_name(container) + if not cname: + raise exc.MissingName("No container name specified") + cont = self._container_cache.get(cname) + if not cont: + hdrs = self.connection.head_container(cname) + cont = Container(self, name=cname, + object_count=hdrs.get("x-container-object-count"), + total_bytes=hdrs.get("x-container-bytes-used")) + self._container_cache[cname] = cont + return cont + + + @handle_swiftclient_exception + def get_container_objects(self, container, marker=None, limit=None, + prefix=None, delimiter=None, full_listing=False): + """ + Return a list of StorageObjects representing the objects in the + container. You can use the marker and limit params to handle pagination, + and the prefix and delimiter params to filter the objects returned. + Also, by default only the first 10,000 objects are returned; if you set + full_listing to True, all objects in the container are returned. + """ + cname = self._resolve_name(container) + hdrs, objs = self.connection.get_container(cname, marker=marker, + limit=limit, prefix=prefix, delimiter=delimiter, + full_listing=full_listing) + cont = self.get_container(cname) + return [StorageObject(self, container=cont, attdict=obj) for obj in objs + if "name" in obj] + + + @handle_swiftclient_exception + def get_container_object_names(self, container, marker=None, limit=None, + prefix=None, delimiter=None, full_listing=False): + cname = self._resolve_name(container) + hdrs, objs = self.connection.get_container(cname, marker=marker, + limit=limit, prefix=prefix, delimiter=delimiter, + full_listing=full_listing) + cont = self.get_container(cname) + return [obj["name"] for obj in objs] + + + @handle_swiftclient_exception + def get_info(self): + """ + Returns a tuple for the number of containers and total bytes in + the account. + """ + hdrs = self.connection.head_container("") + return (hdrs["x-account-container-count"], hdrs["x-account-bytes-used"]) + + + @handle_swiftclient_exception + def list_containers(self, limit=None, marker=None, **parms): + """Returns a list of all container names as strings.""" + hdrs, conts = self.connection.get_container("") + ret = [cont["name"] for cont in conts] + return ret + + + @handle_swiftclient_exception + def list_containers_info(self, limit=None, marker=None, **parms): + """Returns a list of info on Containers. + + For each container, a dict containing the following keys is returned: + \code + name - the name of the container + count - the number of objects in the container + bytes - the total bytes in the container + """ + hdrs, conts = self.connection.get_container("") + return conts + + + @handle_swiftclient_exception + def list_public_containers(self): + """Returns a list of all CDN-enabled containers.""" + response = self.connection.cdn_request("GET", [""]) + status = response.status + if not 200 <= status < 300: + raise exc.CDNFailed("Bad response: (%s) %s" % (status, + response.reason)) + return response.read().splitlines() + + + def make_container_public(self, container, ttl=None): + """Enables CDN access for the specified container.""" + return self._cdn_set_access(container, ttl, True) + + + def make_container_private(self, container): + """ + Disables CDN access to a container. It may still appear public until + its TTL expires. + """ + return self._cdn_set_access(container, None, False) + + + def _cdn_set_access(self, container, ttl, enabled): + """Used to enable or disable CDN access on a container.""" + if ttl is None: + ttl = self.default_cdn_ttl + ct = self.get_container(container) + mthd = "PUT" + hdrs = {"X-CDN-Enabled": "%s" % enabled} + if enabled: + hdrs["X-TTL"] = str(ttl) + response = self.connection.cdn_request(mthd, [ct.name], hdrs=hdrs) + status = response.status + if not 200 <= status < 300: + raise exc.CDNFailed("Bad response: (%s) %s" % (status, + response.reason)) + ct.cdn_ttl = ttl + for hdr in response.getheaders(): + if hdr[0].lower() == "x-cdn-uri": + ct.cdn_uri = hdr[1] + break + self._remove_container_from_cache(container) + # Read the response to force it to close for the next request. + response.read() + + + def set_cdn_log_retention(self, container, enabled): + """ + Defer the logic to the container. It will end up calling + _set_cdn_log_retention() to change it on Cloud Files. + """ + cont = self.get_container(container) + cont.cdn_log_retention = enabled + + + def _set_cdn_log_retention(self, container, enabled): + """This does the actual call to the Cloud Files API.""" + hdrs = {"X-Log-Retention": "%s" % enabled} + cname = self._resolve_name(container) + response = self.connection.cdn_request("POST", [cname], hdrs=hdrs) + status = response.status + if not 200 <= status < 300: + raise exc.CDNFailed("Bad response: (%s) %s" % (status, + response.reason)) + # Read the response to force it to close for the next request. + response.read() + + + def get_container_streaming_uri(self, container): + """ + Returns the URI for streaming content, or None if CDN is not enabled. + """ + cont = self.get_container(container) + return cont.cdn_streaming_uri + + + def get_container_ios_uri(self, container): + """Returns the iOS URI, or None if CDN is not enabled.""" + cont = self.get_container(container) + return cont.cdn_ios_uri + + + def set_container_web_index_page(self, container, page): + """ + Sets the header indicating the index page in a container + when creating a static website. + + Note: the container must be CDN-enabled for this to have + any effect. + """ + hdr = {"X-Container-Meta-Web-Index": page} + return self.set_container_metadata(container, hdr, clear=False) + + + def set_container_web_error_page(self, container, page): + """ + Sets the header indicating the error page in a container + when creating a static website. + + Note: the container must be CDN-enabled for this to have + any effect. + """ + hdr = {"X-Container-Meta-Web-Error": page} + return self.set_container_metadata(container, hdr, clear=False) + + + @handle_swiftclient_exception + def purge_cdn_object(self, container, name, email_addresses=None): + ct = self.get_container(container) + oname = self._resolve_name(name) + if not ct.cdn_enabled: + raise exc.NotCDNEnabled("The object '%s' is not in a " + "CDN-enabled container." % oname) + hdrs = {} + if email_addresses: + if not isinstance(email_addresses, (list, tuple)): + email_addresses = [email_addresses] + emls = ", ".join(email_addresses) + hdrs = {"X-Purge-Email": emls} + response = self.connection.cdn_request("DELETE", [ct.name, oname], + hdrs=hdrs) + # Read the response to force it to close for the next request. + response.read() + return True + + + def _get_user_agent(self): + return self.connection.user_agent + + def _set_user_agent(self, val): + self.connection.user_agent = val + + user_agent = property(_get_user_agent, _set_user_agent) + + + def _get_http_log_debug(self): + return self._http_log_debug + + def _set_http_log_debug(self, val): + self._http_log_debug = val + if val: + os.environ["SWIFTCLIENT_DEBUG"] = "True" + else: + os.environ.pop("SWIFTCLIENT_DEBUG", False) + + http_log_debug = property(_get_http_log_debug, _set_http_log_debug, None, + "Determines if all http traffic is logged to the display " + "for debugging.") + + + +class Connection(_swift_client.Connection): + """This class wraps the swiftclient connection, adding support for CDN""" + def __init__(self, *args, **kwargs): + self.http_log_debug = kwargs.pop("http_log_debug", False) + self._http_log = _swift_client.http_log + self.url = None + super(Connection, self).__init__(*args, **kwargs) + # Add the user_agent, if not defined + try: + self.user_agent + except AttributeError: + self.user_agent = "swiftclient" + self.cdn_connection = None + + + def _make_cdn_connection(self, cdn_url=None): + if cdn_url is not None: + self.cdn_url = cdn_url + parsed = urlparse.urlparse(self.cdn_url) + is_ssl = parsed.scheme == "https" + + # Verify hostnames are valid and parse a port spec (if any) + match = re.match(r"([a-zA-Z0-9\-\.]+):?([0-9]{2,5})?", parsed.netloc) + if match: + (host, port) = match.groups() + else: + host = parsed.netloc + port = None + if not port: + port = 443 if is_ssl else 80 + port = int(port) + path = parsed.path.strip("/") + conn_class = httplib.HTTPSConnection if is_ssl else httplib.HTTPConnection + self.cdn_connection = conn_class(host, port, timeout=CONNECTION_TIMEOUT) + self.cdn_connection.is_ssl = is_ssl + + + def cdn_request(self, method, path=[], data="", hdrs=None): + """ + Given a method (i.e. GET, PUT, POST, etc.), a path, data, header and + metadata dicts, performs an http request against the CDN service. + + Taken directly from the cloudfiles library and modified for use here. + """ + def quote(val): + if isinstance(val, unicode): + val = val.encode("utf-8") + return urllib.quote(val) + + pth = "/".join([quote(elem) for elem in path]) + uri_path = urlparse.urlparse(self.uri).path + path = "%s/%s" % (uri_path.rstrip("/"), pth) + headers = {"Content-Length": str(len(data)), + "User-Agent": self.user_agent, + "X-Auth-Token": self.token} + if isinstance(hdrs, dict): + headers.update(hdrs) + + attempt = 0 + response = None + while attempt < CONNECTION_RETRIES: + if attempt: + # Last try failed; re-create the connection + self._make_cdn_connection() + try: + self.cdn_connection.request(method, path, data, headers) + response = self.cdn_connection.getresponse() + except (socket.error, IOError, httplib.HTTPException) as e: + response = None + if response: + if response.status == 401: + pyrax.identity.authenticate() + headers["X-Auth-Token"] = pyrax.identity.token + else: + break + attempt += 1 + if self.http_log_debug: + self._http_log((path, method), {"headers": headers, "data": data}, + response, "") + return response + + + @property + def uri(self): + return self.url + + + +class FolderUploader(threading.Thread): + """ + Threading class to allow for uploading multiple files in the background. + """ + def __init__(self, root_folder, container, ignore, upload_key, client, + ttl=None): + self.root_folder = root_folder.rstrip("/") + if container: + self.container = client.create_container(container) + else: + self.container = None + self.ignore = utils.coerce_string_to_list(ignore) + self.upload_key = upload_key + self.ttl = ttl + self.client = client + threading.Thread.__init__(self) + + def folder_name_from_path(self, pth): + """Convenience method that first strips trailing path separators.""" + return os.path.basename(pth.rstrip(os.sep)) + + def upload_files_in_folder(self, arg, dirname, fnames): + """Handles the iteration across files within a folder.""" + if utils.match_pattern(dirname, self.ignore): + return False + for fname in (nm for nm in fnames + if not utils.match_pattern(nm, self.ignore)): + if self.client._should_abort_folder_upload(self.upload_key): + return + full_path = os.path.join(dirname, fname) + if os.path.isdir(full_path): + # Skip folders; os.walk will include them in the next pass. + continue + obj_name = os.path.relpath(full_path, self.base_path) + obj_size = os.stat(full_path).st_size + self.client.upload_file(self.container, full_path, + obj_name=obj_name, return_none=True, ttl=self.ttl) + self.client._update_progress(self.upload_key, obj_size) + + def run(self): + """Starts the uploading thread.""" + root_path, folder_name = os.path.split(self.root_folder) + self.base_path = os.path.join(root_path, folder_name) + os.path.walk(self.root_folder, self.upload_files_in_folder, None) diff --git a/awx/lib/site-packages/pyrax/cf_wrapper/container.py b/awx/lib/site-packages/pyrax/cf_wrapper/container.py new file mode 100644 index 0000000000..42cf8aca3e --- /dev/null +++ b/awx/lib/site-packages/pyrax/cf_wrapper/container.py @@ -0,0 +1,364 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import pyrax +from pyrax import exceptions as exc + +# Used to indicate values that are lazy-loaded +class Fault(object): + def __nonzero__(self): + return False + +FAULT = Fault() + + +class Container(object): + """Represents a CloudFiles container.""" + def __init__(self, client, name, object_count=None, total_bytes=None): + self.client = client + self.name = name + self.object_count = int(object_count) + self.total_bytes = int(total_bytes) + self._cdn_uri = FAULT + self._cdn_ttl = FAULT + self._cdn_ssl_uri = FAULT + self._cdn_streaming_uri = FAULT + self._cdn_ios_uri = FAULT + self._cdn_log_retention = FAULT + self._object_cache = {} + + + def _set_cdn_defaults(self): + """Sets all the CDN-related attributes to default values.""" + self._cdn_uri = None + self._cdn_ttl = self.client.default_cdn_ttl + self._cdn_ssl_uri = None + self._cdn_streaming_uri = None + self._cdn_ios_uri = None + self._cdn_log_retention = False + + + def _fetch_cdn_data(self): + """Fetches the object's CDN data from the CDN service""" + response = self.client.connection.cdn_request("HEAD", [self.name]) + if 200 <= response.status < 300: + # Set defaults in case not all headers are present. + self._set_cdn_defaults() + for hdr in response.getheaders(): + low_hdr = hdr[0].lower() + if low_hdr == "x-cdn-uri": + self._cdn_uri = hdr[1] + elif low_hdr == "x-ttl": + self._cdn_ttl = int(hdr[1]) + elif low_hdr == "x-cdn-ssl-uri": + self._cdn_ssl_uri = hdr[1] + elif low_hdr == "x-cdn-streaming-uri": + self._cdn_streaming_uri = hdr[1] + elif low_hdr == "x-cdn-ios-uri": + self._cdn_ios_uri = hdr[1] + elif low_hdr == "x-log-retention": + self._cdn_log_retention = (hdr[1] == "True") + elif response.status == 404: + # Not CDN enabled; set the defaults. + self._set_cdn_defaults() + # We need to read the response in order to clear it for + # the next call + response.read() + + + def get_objects(self, marker=None, limit=None, prefix=None, delimiter=None, + full_listing=False): + """ + Returns a list of StorageObjects representing the objects in the + container. You can use the marker and limit params to handle pagination, + and the prefix and delimiter params to filter the objects returned. + Also, by default only the first 10,000 objects are returned; if you set + full_listing to True, all objects in the container are returned. + """ + objs = self.client.get_container_objects(self.name, marker=marker, + limit=limit, prefix=prefix, delimiter=delimiter, + full_listing=full_listing) + return objs + + + def get_object(self, name): + """ + Return the StorageObject in this container with the + specified name. + """ + if isinstance(name, str): + name = name.decode(pyrax.get_encoding()) + ret = self._object_cache.get(name) + if not ret: + ret = self.client.get_object(self, name) + self._object_cache[name] = ret + return ret + + + def get_object_names(self, marker=None, limit=None, prefix=None, + delimiter=None, full_listing=False): + """ + Returns a list of the names of all the objects in this container. The + same pagination parameters apply as in self.get_objects(). + """ + objs = self.get_objects(marker=marker, limit=limit, prefix=prefix, + delimiter=delimiter, full_listing=full_listing) + return [obj.name for obj in objs] + + + def store_object(self, obj_name, data, content_type=None, etag=None, + content_encoding=None, ttl=None): + """ + Creates a new object in this container, and populates it with + the given data. + """ + return self.client.store_object(self, obj_name, data, + content_type=content_type, etag=etag, + content_encoding=content_encoding, ttl=ttl) + + + def upload_file(self, file_or_path, obj_name=None, content_type=None, + etag=None, return_none=False, content_encoding=None, ttl=None, + content_length=None): + """ + Uploads the specified file to this container. If no name is supplied, + the file's name will be used. Either a file path or an open file-like + object may be supplied. A StorageObject reference to the uploaded file + will be returned, unless 'return_none' is set to True. + """ + return self.client.upload_file(self, file_or_path, obj_name=obj_name, + content_type=content_type, etag=etag, return_none=return_none, + content_encoding=content_encoding, ttl=None, + content_length=content_length) + + + def delete_object(self, obj): + """Deletes the specified object from this container.""" + self.remove_from_cache(obj) + return self.client.delete_object(self, obj) + + + def delete_all_objects(self): + """Deletes all objects from this container.""" + for obj_name in self.client.get_container_object_names(self): + self.client.delete_object(self, obj_name) + + + def remove_from_cache(self, obj): + """Removes the object from the cache.""" + nm = self.client._resolve_name(obj) + self._object_cache.pop(nm, None) + + + def delete(self, del_objects=False): + """ + Deletes this Container. If the container contains objects, the + command will fail unless 'del_objects' is passed as True. In that + case, each object will be deleted first, and then the container. + """ + return self.client.delete_container(self.name, del_objects=del_objects) + + + def fetch_object(self, obj_name, include_meta=False, chunk_size=None): + """ + Fetches the object from storage. + + If 'include_meta' is False, only the bytes representing the + file is returned. + + Note: if 'chunk_size' is defined, you must fully read the object's + contents before making another request. + + When 'include_meta' is True, what is returned from this method is + a 2-tuple: + Element 0: a dictionary containing metadata about the file. + Element 1: a stream of bytes representing the object's contents. + """ + return self.client.fetch_object(self, obj_name, + include_meta=include_meta, chunk_size=chunk_size) + + + def download_object(self, obj_name, directory, structure=True): + """ + Fetches the object from storage, and writes it to the specified + directory. The directory must exist before calling this method. + + If the object name represents a nested folder structure, such as + "foo/bar/baz.txt", that folder structure will be created in the target + directory by default. If you do not want the nested folders to be + created, pass `structure=False` in the parameters. + """ + return self.client.download_object(self, obj_name, directory, + structure=structure) + + + def get_metadata(self): + return self.client.get_container_metadata(self) + + + def set_metadata(self, metadata, clear=False): + return self.client.set_container_metadata(self, metadata, clear=clear) + + + def remove_metadata_key(self, key): + """ + Removes the specified key from the container's metadata. If the key + does not exist in the metadata, nothing is done. + """ + return self.client.remove_container_metadata_key(self, key) + + + def set_web_index_page(self, page): + """ + Sets the header indicating the index page for this container + when creating a static website. + + Note: the container must be CDN-enabled for this to have + any effect. + """ + return self.client.set_container_web_index_page(self, page) + + + def set_web_error_page(self, page): + """ + Sets the header indicating the error page for this container + when creating a static website. + + Note: the container must be CDN-enabled for this to have + any effect. + """ + return self.client.set_container_web_error_page(self, page) + + + def make_public(self, ttl=None): + """Enables CDN access for the specified container.""" + return self.client.make_container_public(self, ttl) + + + def make_private(self): + """ + Disables CDN access to this container. It may still appear public until + its TTL expires. + """ + return self.client.make_container_private(self) + + + def change_object_content_type(self, obj, new_ctype, guess=False): + """ + Copies object to itself, but applies a new content-type. The guess + feature requires the container to be CDN-enabled. If not then the + content-type must be supplied. If using guess with a CDN-enabled + container, new_ctype can be set to None. + Failure during the put will result in a swift exception. + """ + self.client.change_object_content_type(self, obj, new_ctype=new_ctype, + guess=guess) + + + def get_temp_url(self, obj, seconds, method="GET"): + """ + Returns a URL that can be used to access the specified object in this + container. The URL will expire after `seconds` seconds. + + The only methods supported are GET and PUT. Anything else will raise + an InvalidTemporaryURLMethod exception. + """ + return self.client.get_temp_url(self, obj, seconds=seconds, + method=method) + + + def delete_object_in_seconds(self, obj, seconds): + """ + Sets the object to be deleted after the specified number of seconds. + """ + self.client.delete_object_in_seconds(self, obj, seconds) + + + def __repr__(self): + return "<Container '%s'>" % self.name + + + ## BEGIN - CDN property definitions ## + @property + def cdn_enabled(self): + return bool(self.cdn_uri) + + def _get_cdn_log_retention(self): + if self._cdn_log_retention is FAULT: + self._fetch_cdn_data() + return self._cdn_log_retention + + def _set_cdn_log_retention(self, val): + self.client._set_cdn_log_retention(self, val) + self._cdn_log_retention = val + + + def _get_cdn_uri(self): + if self._cdn_uri is FAULT: + self._fetch_cdn_data() + return self._cdn_uri + + def _set_cdn_uri(self, val): + self._cdn_uri = val + + + def _get_cdn_ttl(self): + if self._cdn_ttl is FAULT: + self._fetch_cdn_data() + return self._cdn_ttl + + def _set_cdn_ttl(self, val): + self._cdn_ttl = val + + + def _get_cdn_ssl_uri(self): + if self._cdn_ssl_uri is FAULT: + self._fetch_cdn_data() + return self._cdn_ssl_uri + + def _set_cdn_ssl_uri(self, val): + self._cdn_ssl_uri = val + + + def _get_cdn_streaming_uri(self): + if self._cdn_streaming_uri is FAULT: + self._fetch_cdn_data() + return self._cdn_streaming_uri + + def _set_cdn_streaming_uri(self, val): + self._cdn_streaming_uri = val + + + def _get_cdn_ios_uri(self): + if self._cdn_ios_uri is FAULT: + self._fetch_cdn_data() + return self._cdn_ios_uri + + def _set_cdn_ios_uri(self, val): + self._cdn_ios_uri = val + + + cdn_log_retention = property(_get_cdn_log_retention, _set_cdn_log_retention) + cdn_uri = property(_get_cdn_uri, _set_cdn_uri) + cdn_ttl = property(_get_cdn_ttl, _set_cdn_ttl) + cdn_ssl_uri = property(_get_cdn_ssl_uri, _set_cdn_ssl_uri) + cdn_streaming_uri = property(_get_cdn_streaming_uri, _set_cdn_streaming_uri) + cdn_ios_uri = property(_get_cdn_ios_uri, _set_cdn_ios_uri) + ## END - CDN property definitions ## diff --git a/awx/lib/site-packages/pyrax/cf_wrapper/storage_object.py b/awx/lib/site-packages/pyrax/cf_wrapper/storage_object.py new file mode 100644 index 0000000000..125135235b --- /dev/null +++ b/awx/lib/site-packages/pyrax/cf_wrapper/storage_object.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class StorageObject(object): + """Represents a CloudFiles storage object.""" + def __init__(self, client, container, name=None, total_bytes=None, + content_type=None, last_modified=None, etag=None, attdict=None): + """ + The object can either be initialized with individual params, or by + passing the dict that is returned by swiftclient. + """ + self.client = client + if isinstance(container, basestring): + self.container = self.client.get_container(container) + else: + self.container = container + self.name = name + self.total_bytes = total_bytes + self.content_type = content_type + self.last_modified = last_modified + self.etag = etag + if attdict: + self._read_attdict(attdict) + + + def _read_attdict(self, dct): + """ + Populates the object attributes using the dict returned by swiftclient. + """ + self.name = dct.get("name") + if not self.name: + # Could be a pseudo-subdirectory + self.name = dct.get("subdir").rstrip("/") + self.content_type = "pseudo/subdir" + else: + self.content_type = dct.get("content_type") + self.total_bytes = dct.get("bytes") + self.last_modified = dct.get("last_modified") + self.etag = dct.get("hash") + + + def get(self, include_meta=False, chunk_size=None): + """ + Fetches the object from storage. + + If 'include_meta' is False, only the bytes representing the + file is returned. + + Note: if 'chunk_size' is defined, you must fully read the object's + contents before making another request. + + When 'include_meta' is True, what is returned from this method is a + 2-tuple: + Element 0: a dictionary containing metadata about the file. + Element 1: a stream of bytes representing the object's contents. + """ + return self.client.fetch_object(container=self.container.name, + obj=self, include_meta=include_meta, chunk_size=chunk_size) + # Changing the name of this method to 'fetch', as 'get' is overloaded. + fetch = get + + + def download(self, directory, structure=True): + """ + Fetches the object from storage, and writes it to the specified + directory. The directory must exist before calling this method. + + If the object name represents a nested folder structure, such as + "foo/bar/baz.txt", that folder structure will be created in the target + directory by default. If you do not want the nested folders to be + created, pass `structure=False` in the parameters. + """ + return self.client.download_object(self.container, self, directory, + structure=structure) + + + def delete(self): + """Deletes the object from storage.""" + self.client.delete_object(container=self.container.name, name=self.name) + + + def purge(self, email_addresses=[]): + """ + Purges the object from the CDN network, sending an optional + email confirmation. + """ + self.client.purge_cdn_object(container=self.container.name, + name=self.name, email_addresses=email_addresses) + + + def get_metadata(self): + """Returns this object's metadata.""" + return self.client.get_object_metadata(self.container, self) + + + def set_metadata(self, metadata, clear=False): + """ + Sets this object's metadata, optionally clearing existing metadata. + """ + self.client.set_object_metadata(self.container, self, metadata, + clear=clear) + + + def remove_metadata_key(self, key): + """ + Removes the specified key from the storage object's metadata. If the + key does not exist in the metadata, nothing is done. + """ + self.client.remove_object_metadata_key(self.container, self, key) + + + def change_content_type(self, new_ctype, guess=False): + """ + Copies object to itself, but applies a new content-type. The guess + feature requires the container to be CDN-enabled. If not then the + content-type must be supplied. If using guess with a CDN-enabled + container, new_ctype can be set to None. + Failure during the put will result in a swift exception. + """ + self.client.change_object_content_type(self.container, self, + new_ctype=new_ctype, guess=guess) + + + def get_temp_url(self, seconds, method="GET"): + """ + Returns a URL that can be used to access this object. The URL will + expire after `seconds` seconds. + + The only methods supported are GET and PUT. Anything else will raise + an InvalidTemporaryURLMethod exception. + """ + return self.client.get_temp_url(self.container, self, seconds=seconds, + method=method) + + + def delete_in_seconds(self, seconds): + """ + Sets the object to be deleted after the specified number of seconds. + """ + self.client.delete_object_in_seconds(self.container, self, seconds) + + + def __repr__(self): + return "<Object '%s' (%s)>" % (self.name, self.content_type) diff --git a/awx/lib/site-packages/pyrax/client.py b/awx/lib/site-packages/pyrax/client.py new file mode 100644 index 0000000000..2a167daeda --- /dev/null +++ b/awx/lib/site-packages/pyrax/client.py @@ -0,0 +1,286 @@ +# Copyright 2010 Jacob Kaplan-Moss +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Piston Cloud Computing, Inc. +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +OpenStack Client interface. Handles the REST calls and responses. +""" + +import httplib2 +import json +import logging +import os +import pkg_resources +import time +from urllib import quote +import urlparse + +from manager import BaseManager +from resource import BaseResource +import pyrax +import pyrax.exceptions as exc +import pyrax.service_catalog as service_catalog +import pyrax.utils as utils + + +class BaseClient(httplib2.Http): + """ + The base class for all pyrax clients. + """ + # This will get set by pyrax when the service is started. + user_agent = None + # Each client subclass should set their own name. + name = "base" + + def __init__(self, region_name=None, endpoint_type="publicURL", + management_url=None, service_type=None, service_name=None, + timings=False, verify_ssl=True, http_log_debug=False, + timeout=None): + super(BaseClient, self).__init__(timeout=timeout) + self.version = "v1.1" + self.region_name = region_name + self.endpoint_type = endpoint_type + self.service_type = service_type + self.service_name = service_name + self.management_url = management_url + self.timings = timings + self.verify_ssl = verify_ssl + self.http_log_debug = http_log_debug + self.times = [] # [("item", starttime, endtime), ...] + + # httplib2 overrides + self.force_exception_to_status_code = True + self.disable_ssl_certificate_validation = not verify_ssl + + self._logger = logging.getLogger(self.__class__.__name__) + ch = logging.StreamHandler() + self._logger.setLevel(logging.DEBUG) + self._logger.addHandler(ch) + self._manager = None + # Hook method for subclasses to create their manager instance + # without having to override __init__(). + self._configure_manager() + + + def _configure_manager(self): + """ + This must be overridden in base classes to create + the required manager class and configure it as needed. + """ + raise NotImplementedError + + + # The next 6 methods are simple pass-through to the manager. + def list(self, limit=None, marker=None): + """Returns a list of all resources.""" + return self._manager.list(limit=limit, marker=marker) + + + def get(self, item): + """Gets a specific resource.""" + return self._manager.get(item) + + + def create(self, *args, **kwargs): + """Creates a new resource.""" + return self._manager.create(*args, **kwargs) + + + def delete(self, item): + """Deletes a specific resource.""" + return self._manager.delete(item) + + + def find(self, **kwargs): + """ + Finds a single item with attributes matching ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + return self._manager.find(**kwargs) + + + def findall(self, **kwargs): + """ + Finds all items with attributes matching ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + return self._manager.findall(**kwargs) + + + def unauthenticate(self): + """Clears all of our authentication information.""" + pyrax.identity.unauthenticate() + + + def get_timings(self): + """Returns a list of all execution timings.""" + return self.times + + + def reset_timings(self): + """Clears the timing history.""" + self.times = [] + + + def http_log_req(self, args, kwargs): + """ + When self.http_log_debug is True, outputs the equivalent `curl` + command for the API request being made. + """ + if not self.http_log_debug: + return + + string_parts = ["curl -i"] + for element in args: + if element in ("GET", "POST", "PUT", "DELETE", "HEAD"): + string_parts.append(" -X %s" % element) + else: + string_parts.append(" %s" % element) + + for element in kwargs["headers"]: + header = " -H '%s: %s'" % (element, kwargs["headers"][element]) + string_parts.append(header) + + self._logger.debug("\nREQ: %s\n" % "".join(string_parts)) + if "body" in kwargs: + self._logger.debug("REQ BODY: %s\n" % (kwargs["body"])) + + + def http_log_resp(self, resp, body): + """ + When self.http_log_debug is True, outputs the response received + from the API request. + """ + if not self.http_log_debug: + return + self._logger.debug("RESP: %s %s\n", resp, body) + + + def request(self, *args, **kwargs): + """ + Formats the request into a dict representing the headers + and body that will be used to make the API call. + """ + kwargs.setdefault("headers", kwargs.get("headers", {})) + kwargs["headers"]["User-Agent"] = self.user_agent + kwargs["headers"]["Accept"] = "application/json" + if "body" in kwargs: + kwargs["headers"]["Content-Type"] = "application/json" + kwargs["body"] = json.dumps(kwargs["body"]) + self.http_log_req(args, kwargs) + resp, body = super(BaseClient, self).request(*args, **kwargs) + self.http_log_resp(resp, body) + + if body: + try: + body = json.loads(body) + except ValueError: + pass + else: + body = None + + if resp.status >= 400: + raise exc.from_response(resp, body) + + return resp, body + + def _time_request(self, uri, method, **kwargs): + """Wraps the request call and records the elapsed time.""" + start_time = time.time() + resp, body = self.request(uri, method, **kwargs) + self.times.append(("%s %s" % (method, uri), + start_time, time.time())) + return resp, body + + def _api_request(self, uri, method, **kwargs): + """ + Manages the request by adding any auth information, and retries + the request after authenticating if the initial request returned + and Unauthorized exception. + """ + id_svc = pyrax.identity + if not all((self.management_url, id_svc.token, id_svc.tenant_id)): + id_svc.authenticate() + + if not self.management_url: + # We've authenticated but no management_url has been set. This + # indicates that the service is not available. + raise exc.ServiceNotAvailable("The '%s' service is not available." + % self) + # Perform the request once. If we get a 401 back then it + # might be because the auth token expired, so try to + # re-authenticate and try again. If it still fails, bail. + try: + kwargs.setdefault("headers", {})["X-Auth-Token"] = id_svc.token + if id_svc.tenant_id: + kwargs["headers"]["X-Auth-Project-Id"] = id_svc.tenant_id + resp, body = self._time_request(self.management_url + + quote(uri, safe="/.?&="), method, **kwargs) + return resp, body + except exc.Unauthorized as ex: + try: + id_svc.authenticate() + kwargs["headers"]["X-Auth-Token"] = id_svc.token + resp, body = self._time_request(self.management_url + uri, + method, **kwargs) + return resp, body + except exc.Unauthorized: + raise ex + + + def method_get(self, uri, **kwargs): + """Method used to make GET requests.""" + return self._api_request(uri, "GET", **kwargs) + + + def method_post(self, uri, **kwargs): + """Method used to make POST requests.""" + return self._api_request(uri, "POST", **kwargs) + + + def method_put(self, uri, **kwargs): + """Method used to make PUT requests.""" + return self._api_request(uri, "PUT", **kwargs) + + + def method_delete(self, uri, **kwargs): + """Method used to make DELETE requests.""" + return self._api_request(uri, "DELETE", **kwargs) + + + def authenticate(self): + """ + Handles all aspects of authentication against the cloud provider. + Currently this has only been tested with Rackspace auth; if you wish + to use this library with a different OpenStack provider, you may have + to modify this method. Please post your findings on GitHub so that + others can benefit. + """ + return pyrax.identity.authenticate() + + + @property + def projectid(self): + """ + The older parts of this code used 'projectid'; this wraps that + reference. + """ + return pyrax.identity.tenant_id diff --git a/awx/lib/site-packages/pyrax/cloudblockstorage.py b/awx/lib/site-packages/pyrax/cloudblockstorage.py new file mode 100644 index 0000000000..6dbe04bf4c --- /dev/null +++ b/awx/lib/site-packages/pyrax/cloudblockstorage.py @@ -0,0 +1,390 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from functools import wraps +import time + +import pyrax +from pyrax.client import BaseClient +import pyrax.exceptions as exc +from pyrax.manager import BaseManager +from pyrax.resource import BaseResource +import pyrax.utils as utils + + +MIN_SIZE = 100 +MAX_SIZE = 1024 +RETRY_INTERVAL = 5 + + +def _resolve_id(val): + """Takes an object or an ID and returns the ID.""" + return val if isinstance(val, basestring) else val.id + + +def _resolve_name(val): + """Takes an object or a name and returns the name.""" + return val if isinstance(val, basestring) else val.name + + +def assure_volume(fnc): + """ + Converts a volumeID passed as the volume to a CloudBlockStorageVolume object. + """ + @wraps(fnc) + def _wrapped(self, volume, *args, **kwargs): + if not isinstance(volume, CloudBlockStorageVolume): + # Must be the ID + volume = self._manager.get(volume) + return fnc(self, volume, *args, **kwargs) + return _wrapped + + +def assure_snapshot(fnc): + """ + Converts a snapshot ID passed as the snapshot to a CloudBlockStorageSnapshot + object. + """ + @wraps(fnc) + def _wrapped(self, snapshot, *args, **kwargs): + if not isinstance(snapshot, CloudBlockStorageSnapshot): + # Must be the ID + snapshot = self._snaps_manager.get(snapshot) + return fnc(self, snapshot, *args, **kwargs) + return _wrapped + + +class CloudBlockStorageSnapshot(BaseResource): + """ + This class represents a Snapshot (copy) of a Block Storage Volume. + """ + def delete(self): + """ + Adds a check to make sure that the snapshot is able to be deleted. + """ + if not self.status in ("available", "error"): + raise exc.SnapshotNotAvailable("Snapshot must be in 'available' " + "or 'error' status before deleting. Current status: %s" % + self.status) + # When there are more thann one snapshot for a given volume, attempting to + # delete them all will throw a 409 exception. This will help by retrying + # such an error once after a RETRY_INTERVAL second delay. + try: + super(CloudBlockStorageSnapshot, self).delete() + except exc.ClientException as e: + if "Request conflicts with in-progress 'DELETE" in str(e): + time.sleep(RETRY_INTERVAL) + # Try again; if it fails, oh, well... + super(CloudBlockStorageSnapshot, self).delete() + + + def _get_name(self): + return self.display_name + + def _set_name(self, val): + self.display_name = val + + name = property(_get_name, _set_name, None, + "Convenience for referencing the display_name.") + + def _get_description(self): + return self.display_description + + def _set_description(self, val): + self.display_description = val + + description = property(_get_description, _set_description, None, + "Convenience for referencing the display_description.") + + +class CloudBlockStorageVolumeType(BaseResource): + """ + This class represents a Block Storage Volume Type. + """ + pass + + +class CloudBlockStorageVolume(BaseResource): + """ + This class represents a Block Storage volume. + """ + def __init__(self, *args, **kwargs): + super(CloudBlockStorageVolume, self).__init__(*args, **kwargs) + try: + region = self.manager.api.region_name + self._nova_volumes = pyrax.connect_to_cloudservers(region).volumes + except AttributeError: + # This will happen in unit testing, where the full pyrax + # namespace is not exposed. In that situation, there is + # no need for the reference anyway + pass + self._snapshot_manager = BaseManager(self.manager.api, + resource_class=CloudBlockStorageSnapshot, + response_key="snapshot", uri_base="snapshots") + + + def attach_to_instance(self, instance, mountpoint): + """ + Attaches this volume to the cloud server instance at the + specified mountpoint. This requires a call to the cloud servers + API; it cannot be done directly. + """ + instance_id = _resolve_id(instance) + try: + resp = self._nova_volumes.create_server_volume(instance_id, + self.id, mountpoint) + except Exception as e: + raise exc.VolumeAttachmentFailed("%s" % e) + + + def detach(self): + """ + Detaches this volume from any device it may be attached to. If it + is not attached, nothing happens. + """ + attachments = self.attachments + if not attachments: + # Not attached; no error needed, just return + return + # A volume can only be attached to one device at a time, but for some + # reason this is a list instead of a singular value + att = attachments[0] + instance_id = att["server_id"] + attachment_id = att["id"] + try: + self._nova_volumes.delete_server_volume(instance_id, attachment_id) + except Exception as e: + raise exc.VolumeDetachmentFailed("%s" % e) + + + def delete(self, force=False): + """ + Volumes cannot be deleted if either a) they are attached to a device, or + b) they have any snapshots. This method overrides the base delete() + method to both better handle these failures, and also to offer a 'force' + option. When 'force' is True, the volume is detached, and any dependent + snapshots are deleted before calling the volume's delete. + """ + if force: + self.detach() + self.delete_all_snapshots() + try: + super(CloudBlockStorageVolume, self).delete() + except exc.VolumeNotAvailable: + # Notify the user? Record it somewhere? + # For now, just re-raise + raise + + + def create_snapshot(self, name=None, description=None, force=False): + """ + Creates a snapshot of this volume, with an optional name and + description. + + Normally snapshots will not happen if the volume is attached. To + override this default behavior, pass force=True. + """ + name = name or "" + description = description or "" + # Note that passing in non-None values is required for the _create_body + # method to distinguish between this and the request to create and + # instance. + try: + snap = self._snapshot_manager.create(volume=self, name=name, + description=description, force=force) + except exc.BadRequest as e: + msg = str(e) + if "Invalid volume: must be available" in msg: + # The volume for the snapshot was attached. + raise exc.VolumeNotAvailable("Cannot create a snapshot from an " + "attached volume. Detach the volume before trying again, " + "or pass 'force=True' to the create_snapshot() call.") + else: + # Some other error + raise + except exc.ClientException as e: + if e.code == 409: + if "Request conflicts with in-progress" in str(e): + raise exc.VolumeNotAvailable("The volume is current " + "creating a snapshot. You must wait until that " + "completes before attempting to create an " + "additional snapshot.") + else: + raise + else: + raise + return snap + + + def list_snapshots(self): + """ + Returns a list of all snapshots of this volume. + """ + return [snap for snap in self._snapshot_manager.list() + if snap.volume_id == self.id] + + + def delete_all_snapshots(self): + """ + Locates all snapshots of this volume and deletes them. + """ + for snap in self.list_snapshots(): + snap.delete() + + + def _get_name(self): + return self.display_name + + def _set_name(self, val): + self.display_name = val + + name = property(_get_name, _set_name, None, + "Convenience for referencing the display_name.") + + def _get_description(self): + return self.display_description + + def _set_description(self, val): + self.display_description = val + + description = property(_get_description, _set_description, None, + "Convenience for referencing the display_description.") + + +class CloudBlockStorageClient(BaseClient): + """ + This is the primary class for interacting with Cloud Block Storage. + """ + name = "Cloud Block Storage" + + def _configure_manager(self): + """ + Create the manager to handle the instances, and also another + to handle flavors. + """ + self._manager = BaseManager(self, + resource_class=CloudBlockStorageVolume, response_key="volume", + uri_base="volumes") + self._types_manager = BaseManager(self, + resource_class=CloudBlockStorageVolumeType, + response_key="volume_type", uri_base="types") + self._snaps_manager = BaseManager(self, + resource_class=CloudBlockStorageSnapshot, + response_key="snapshot", uri_base="snapshots") + + + def create(self, name="", size=None, volume_type=None, description=None, + metadata=None, snapshot_id=None, availability_zone=None): + """ + Makes sure that the size is passed and is within allowed values. + """ + if not isinstance(size, (int, long)) or not ( + MIN_SIZE <= size <= MAX_SIZE): + raise exc.InvalidSize("Volume sizes must be integers between " + "%s and %s." % (MIN_SIZE, MAX_SIZE)) + return super(CloudBlockStorageClient, self).create(name, size=size, + volume_type=volume_type, description=description, + metadata=metadata, snapshot_id=snapshot_id, + availability_zone=availability_zone) + + + def list_types(self): + """Returns a list of all available volume types.""" + return self._types_manager.list() + + + def list_snapshots(self): + """Returns a list of all snapshots.""" + return self._snaps_manager.list() + + + def _create_body(self, name, size=None, volume_type=None, description=None, + metadata=None, snapshot_id=None, availability_zone=None, + volume=None, force=False): + """ + Used to create the dict required to create any of the following: + A new volume + A new snapshot + """ + if size is not None: + # Creating a volume + if not isinstance(size, (int, long)) or not ( + MIN_SIZE <= size <= MAX_SIZE): + raise exc.InvalidSize("Volume sizes must be integers between " + "%s and %s." % (MIN_SIZE, MAX_SIZE)) + if volume_type is None: + volume_type = "SATA" + if description is None: + description = "" + if metadata is None: + metadata = {} + body = {"volume": { + "size": size, + "snapshot_id": snapshot_id, + "display_name": name, + "display_description": description, + "volume_type": volume_type, + "metadata": metadata, + "availability_zone": availability_zone, + }} + else: + # Creating a snapshot + body = {"snapshot": { + "display_name": name, + "display_description": description, + "volume_id": volume.id, + "force": str(force).lower(), + }} + return body + + + @assure_volume + def attach_to_instance(self, volume, instance, mountpoint): + """Attaches the volume to the specified instance at the mountpoint.""" + return volume.attach_to_instance(instance, mountpoint) + + + @assure_volume + def detach(self, volume): + """Detaches the volume from whatever device it is attached to.""" + return volume.detach() + + + @assure_volume + def delete_volume(self, volume, force=False): + """Deletes the volume.""" + return volume.delete(force=force) + + + @assure_volume + def create_snapshot(self, volume, name=None, description=None, force=False): + """ + Creates a snapshot of the volume, with an optional name and description. + + Normally snapshots will not happen if the volume is attached. To + override this default behavior, pass force=True. + """ + return volume.create_snapshot(name=name, description=description, + force=force) + + + @assure_snapshot + def delete_snapshot(self, snapshot): + """Deletes the snapshot.""" + return snapshot.delete() diff --git a/awx/lib/site-packages/pyrax/clouddatabases.py b/awx/lib/site-packages/pyrax/clouddatabases.py new file mode 100644 index 0000000000..98dc760b6c --- /dev/null +++ b/awx/lib/site-packages/pyrax/clouddatabases.py @@ -0,0 +1,698 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from functools import wraps + +from pyrax.client import BaseClient +import pyrax.exceptions as exc +from pyrax.manager import BaseManager +from pyrax.resource import BaseResource +import pyrax.utils as utils + + +def assure_instance(fnc): + @wraps(fnc) + def _wrapped(self, instance, *args, **kwargs): + if not isinstance(instance, CloudDatabaseInstance): + # Must be the ID + instance = self._manager.get(instance) + return fnc(self, instance, *args, **kwargs) + return _wrapped + + + +class CloudDatabaseVolume(object): + instance = None + size = None + used = None + + def __init__(self, instance, info): + self.instance = instance + for key, val in info.items(): + setattr(self, key, val) + + def resize(self, size): + self.instance.resize_volume(size) + self.size = size + + def get(self, att): + # For backwards compatibility + return getattr(self, att) + + + +class CloudDatabaseManager(BaseManager): + """ + This class manages communication with Cloud Database resources. + """ + def get(self, item): + """ + This additional code is necessary to properly return the 'volume' + attribute of the instance as a CloudDatabaseVolume object instead of + a raw dict. + """ + resource = super(CloudDatabaseManager, self).get(item) + resource.volume = CloudDatabaseVolume(resource, resource.volume) + return resource + + +class CloudDatabaseUserManager(BaseManager): + """ + This class handles operations on the users in a Cloud Database. + """ + def _get_db_names(self, dbs, strict=True): + """ + Accepts a single db (name or object) or a list of dbs, and returns a + list of database names. If any of the supplied dbs do not exist, a + NoSuchDatabase exception will be raised, unless you pass strict=False. + """ + dbs = utils.coerce_string_to_list(dbs) + db_names = [utils.get_name(db) for db in dbs] + if strict: + good_dbs = self.instance.list_databases() + good_names = [utils.get_name(good_db) for good_db in good_dbs] + bad_names = [db_name for db_name in db_names + if db_name not in good_names] + if bad_names: + bad = ", ".join(bad_names) + raise exc.NoSuchDatabase("The following database(s) were not " + "found: %s" % bad) + return db_names + + + def change_user_password(self, user, new_pass): + """ + Changes the password for the user to the supplied value. + + Returns None upon success; raises PasswordChangeFailed if the call + does not complete successfully. + """ + user = utils.get_name(user) + body = {"users": [{"name": user, "password": new_pass}]} + uri = "/%s" % self.uri_base + resp, resp_body = self.api.method_put(uri, body=body) + if resp.status > 299: + raise exc.PasswordChangeFailed("Password for '%s' was not changed" + % user) + return None + + + def list_user_access(self, user): + """ + Returns a list of all database names for which the specified user + has access rights. + """ + user = utils.get_name(user) + uri = "/%s/%s/databases" % (self.uri_base, user) + try: + resp, resp_body = self.api.method_get(uri) + except exc.NotFound as e: + raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user) + dbs = resp_body.get("databases", {}) + return [CloudDatabaseDatabase(self, db) for db in dbs] + + + def grant_user_access(self, user, db_names, strict=True): + """ + Gives access to the databases listed in `db_names` to the user. You may + pass in either a single db or a list of dbs. + + If any of the databases do not exist, a NoSuchDatabase exception will + be raised, unless you specify `strict=False` in the call. + """ + user = utils.get_name(user) + uri = "/%s/%s/databases" % (self.uri_base, user) + db_names = self._get_db_names(db_names, strict=strict) + dbs = [{"name": db_name} for db_name in db_names] + body = {"databases": dbs} + try: + resp, resp_body = self.api.method_put(uri, body=body) + except exc.NotFound as e: + raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user) + + + def revoke_user_access(self, user, db_names, strict=True): + """ + Revokes access to the databases listed in `db_names` for the user. + + If any of the databases do not exist, a NoSuchDatabase exception will + be raised, unless you specify `strict=False` in the call. + """ + user = utils.get_name(user) + db_names = self._get_db_names(db_names, strict=strict) + bad_names = [] + for db_name in db_names: + uri = "/%s/%s/databases/%s" % (self.uri_base, user, db_name) + resp, resp_body = self.api.method_delete(uri) + + + +class CloudDatabaseInstance(BaseResource): + """ + This class represents a MySQL instance in the cloud. + """ + def __init__(self, *args, **kwargs): + super(CloudDatabaseInstance, self).__init__(*args, **kwargs) + self._database_manager = CloudDatabaseManager(self.manager.api, + resource_class=CloudDatabaseDatabase, response_key="database", + uri_base="instances/%s/databases" % self.id) + self._user_manager = CloudDatabaseUserManager(self.manager.api, + resource_class=CloudDatabaseUser, response_key="user", + uri_base="instances/%s/users" % self.id) + # Add references to the parent instance to the managers. + self._database_manager.instance = self._user_manager.instance = self + # Remove the lazy load + if not self.loaded: + self.get() + + + def get(self): + """ + Need to override the default get() behavior by making the 'volume' + attribute into a CloudDatabaseVolume object instead of the raw dict. + """ + super(CloudDatabaseInstance, self).get() + # Make the volume into an accessible object instead of a dict + self.volume = CloudDatabaseVolume(self, self.volume) + + + def list_databases(self): + """Returns a list of the names of all databases for this instance.""" + return self._database_manager.list() + + + def list_users(self): + """Returns a list of the names of all users for this instance.""" + return self._user_manager.list() + + + def get_user(self, name): + """ + Finds the user in this instance with the specified name, and + returns a CloudDatabaseUser object. If no match is found, a + NoSuchDatabaseUser exception is raised. + """ + try: + return self._user_manager.get(name) + except exc.NotFound: + raise exc.NoSuchDatabaseUser("No user by the name '%s' exists." % + name) + + + def get_database(self, name): + """ + Finds the database in this instance with the specified name, and + returns a CloudDatabaseDatabase object. If no match is found, a + NoSuchDatabase exception is raised. + """ + try: + return [db for db in self.list_databases() + if db.name == name][0] + except IndexError: + raise exc.NoSuchDatabase("No database by the name '%s' exists." % + name) + + + def create_database(self, name, character_set=None, collate=None): + """ + Creates a database with the specified name. If a database with + that name already exists, a BadRequest (400) exception will + be raised. + """ + if character_set is None: + character_set = "utf8" + if collate is None: + collate = "utf8_general_ci" + # Note that passing in non-None values is required for the _create_body + # method to distinguish between this and the request to create and + # instance. + self._database_manager.create(name=name, character_set=character_set, + collate=collate, return_none=True) + # Since the API doesn't return the info for creating the database + # object, we have to do it manually. + return self._database_manager.find(name=name) + + + def create_user(self, name, password, database_names): + """ + Creates a user with the specified name and password, and gives that + user access to the specified database(s). + + If a user with + that name already exists, a BadRequest (400) exception will + be raised. + """ + if not isinstance(database_names, list): + database_names = [database_names] + # The API only accepts names, not DB objects + database_names = [db if isinstance(db, basestring) else db.name + for db in database_names] + # Note that passing in non-None values is required for the create_body + # method to distinguish between this and the request to create and + # instance. + self._user_manager.create(name=name, password=password, + database_names=database_names, return_none=True) + # Since the API doesn't return the info for creating the user object, we + # have to do it manually. + return self._user_manager.find(name=name) + + + def delete_database(self, name_or_obj): + """ + Deletes the specified database. If no database by that name + exists, no exception will be raised; instead, nothing at all + is done. + """ + name = utils.get_name(name_or_obj) + self._database_manager.delete(name) + + + def change_user_password(self, user, new_pass): + """ + Changes the password for the user to the supplied value. + + Returns None upon success; raises PasswordChangeFailed if the call + does not complete successfully. + """ + return self._user_manager.change_user_password(user, new_pass) + + + def list_user_access(self, user): + """ + Returns a list of all database names for which the specified user + has access rights. + """ + return self._user_manager.list_user_access(user) + + + def grant_user_access(self, user, db_names, strict=True): + """ + Gives access to the databases listed in `db_names` to the user. + """ + return self._user_manager.grant_user_access(user, db_names, + strict=strict) + + + def revoke_user_access(self, user, db_names, strict=True): + """ + Revokes access to the databases listed in `db_names` for the user. + """ + return self._user_manager.revoke_user_access(user, db_names, + strict=strict) + + + def delete_user(self, user): + """ + Deletes the specified user. If no user by that name + exists, no exception will be raised; instead, nothing at all + is done. + """ + name = utils.get_name(user) + self._user_manager.delete(name) + + + def enable_root_user(self): + """ + Enables login from any host for the root user and provides + the user with a generated root password. + """ + uri = "/instances/%s/root" % self.id + resp, body = self.manager.api.method_post(uri) + return body["user"]["password"] + + + def root_user_status(self): + """ + Returns True or False, depending on whether the root user + for this instance has been enabled. + """ + uri = "/instances/%s/root" % self.id + resp, body = self.manager.api.method_get(uri) + return body["rootEnabled"] + + + def restart(self): + """Restarts this instance.""" + self.manager.action(self, "restart") + + + def resize(self, flavor): + """Set the size of this instance to a different flavor.""" + # We need the flavorRef, not the flavor or size. + flavorRef = self.manager.api._get_flavor_ref(flavor) + body = {"flavorRef": flavorRef} + self.manager.action(self, "resize", body=body) + + + def resize_volume(self, size): + """Changes the size of the volume for this instance.""" + curr_size = self.volume.size + if size <= curr_size: + raise exc.InvalidVolumeResize("The new volume size must be larger " + "than the current volume size of '%s'." % curr_size) + body = {"volume": {"size": size}} + self.manager.action(self, "resize", body=body) + + + def _get_flavor(self): + try: + ret = self._flavor + except AttributeError: + ret = self._flavor = CloudDatabaseFlavor( + self.manager.api._flavor_manager, {}) + return ret + + def _set_flavor(self, flavor): + if isinstance(flavor, dict): + self._flavor = CloudDatabaseFlavor(self.manager.api._flavor_manager, + flavor) + else: + # Must be an instance + self._flavor = flavor + + flavor = property(_get_flavor, _set_flavor) + + +class CloudDatabaseDatabase(BaseResource): + """ + This class represents a database on a CloudDatabaseInstance. It is not + a true cloud entity, but a convenience object for dealing with databases + on instances. + """ + get_details = True + + def delete(self): + """This class doesn't have an 'id', so pass the name.""" + self.manager.delete(self.name) + + +class CloudDatabaseUser(BaseResource): + """ + This class represents a user on a CloudDatabaseInstance. It is not + a true cloud entity, but a convenience object for dealing with users + for instances. + """ + get_details = False + + def delete(self): + """This class doesn't have an 'id', so pass the name.""" + self.manager.delete(self.name) + + + def change_password(self, new_pass): + """ + Changes the password for this user to the supplied value. + + Returns None upon success; raises PasswordChangeFailed if the call + does not complete successfully. + """ + self.manager.change_user_password(self, new_pass) + + + def list_user_access(self): + """ + Returns a list of all database names for which the specified user + has access rights. + """ + return self.manager.list_user_access(self) + + + def grant_user_access(self, db_names, strict=True): + """ + Gives access to the databases listed in `db_names` to the user. + """ + return self.manager.grant_user_access(self, db_names, strict=strict) + + + def revoke_user_access(self, db_names, strict=True): + """ + Revokes access to the databases listed in `db_names` for the user. + """ + return self.manager.revoke_user_access(self, db_names, strict=strict) + + + +class CloudDatabaseFlavor(BaseResource): + """ + This class represents the available instance configurations, or 'flavors', + which you use to define the memory and CPU size of your instance. These + objects are read-only. + """ + get_details = True + _non_display = ["links"] + + + +class CloudDatabaseClient(BaseClient): + """ + This is the primary class for interacting with Cloud Databases. + """ + name = "Cloud Databases" + + def _configure_manager(self): + """ + Creates a manager to handle the instances, and another + to handle flavors. + """ + self._manager = CloudDatabaseManager(self, + resource_class=CloudDatabaseInstance, response_key="instance", + uri_base="instances") + self._flavor_manager = BaseManager(self, + resource_class=CloudDatabaseFlavor, response_key="flavor", + uri_base="flavors") + + + @assure_instance + def list_databases(self, instance): + """Returns all databases for the specified instance.""" + return instance.list_databases() + + + @assure_instance + def create_database(self, instance, name, character_set=None, + collate=None): + """Creates a database with the specified name on the given instance.""" + return instance.create_database(name, character_set=character_set, + collate=collate) + + + @assure_instance + def get_database(self, instance, name): + """ + Finds the database in the given instance with the specified name, and + returns a CloudDatabaseDatabase object. If no match is found, a + NoSuchDatabase exception is raised. + """ + return instance.get_database(name) + + + @assure_instance + def delete_database(self, instance, name): + """Deletes the database by name on the given instance.""" + return instance.delete_database(name) + + + @assure_instance + def list_users(self, instance): + """Returns all users for the specified instance.""" + return instance.list_users() + + + @assure_instance + def create_user(self, instance, name, password, database_names): + """ + Creates a user with the specified name and password, and gives that + user access to the specified database(s). + """ + return instance.create_user(name=name, password=password, + database_names=database_names) + + + @assure_instance + def get_user(self, instance, name): + """ + Finds the user in the given instance with the specified name, and + returns a CloudDatabaseUser object. If no match is found, a + NoSuchUser exception is raised. + """ + return instance.get_user(name) + + + @assure_instance + def delete_user(self, instance, name): + """Deletes the user by name on the given instance.""" + return instance.delete_user(name) + + + @assure_instance + def change_user_password(self, instance, user, new_pass): + """ + Changes the password for the user of the specified instance to the + supplied value. + + Returns None upon success; raises PasswordChangeFailed if the call + does not complete successfully. + """ + return instance.change_user_password(user, new_pass) + + + @assure_instance + def list_user_access(self, instance, user): + """ + Returns a list of all database names for which the specified user + has access rights on the specified instance. + """ + return instance.list_user_access(user) + + + @assure_instance + def grant_user_access(self, instance, user, db_names, strict=True): + """ + Gives access to the databases listed in `db_names` to the user + on the specified instance. + """ + return instance.grant_user_access(user, db_names, strict=strict) + + + @assure_instance + def revoke_user_access(self, instance, user, db_names, strict=True): + """ + Revokes access to the databases listed in `db_names` for the user + on the specified instance. + """ + return instance.revoke_user_access(user, db_names, strict=strict) + + + @assure_instance + def enable_root_user(self, instance): + """ + This enables login from any host for the root user and provides + the user with a generated root password. + """ + return instance.enable_root_user() + + + @assure_instance + def root_user_status(self, instance): + """Returns True if the given instance is root-enabled.""" + return instance.root_user_status() + + + @assure_instance + def restart(self, instance): + """Restarts the instance.""" + return instance.restart() + + + @assure_instance + def resize(self, instance, flavor): + """Sets the size of the instance to a different flavor.""" + return instance.resize(flavor) + + + def list_flavors(self): + """Returns a list of all available Flavors.""" + return self._flavor_manager.list() + + + def get_flavor(self, flavor_id): + """Returns a specific Flavor object by ID.""" + return self._flavor_manager.get(flavor_id) + + + def _get_flavor_ref(self, flavor): + """ + Flavors are odd in that the API expects an href link, not an ID, as with + nearly every other resource. This method takes either a + CloudDatabaseFlavor object, a flavor ID, a RAM size, or a flavor name, + and uses that to determine the appropriate href. + """ + flavor_obj = None + if isinstance(flavor, CloudDatabaseFlavor): + flavor_obj = flavor + elif isinstance(flavor, int): + # They passed an ID or a size + try: + flavor_obj = self.get_flavor(flavor) + except exc.NotFound: + # Must be either a size or bad ID, which will + # be handled below + pass + if flavor_obj is None: + # Try flavor name + flavors = self.list_flavors() + try: + flavor_obj = [flav for flav in flavors + if flav.name == flavor][0] + except IndexError: + # No such name; try matching RAM + try: + flavor_obj = [flav for flav in flavors + if flav.ram == flavor][0] + except IndexError: + raise exc.FlavorNotFound("Could not determine flavor from " + "'%s'." % flavor) + # OK, we have a Flavor object. Get the href + href = [link["href"] for link in flavor_obj.links + if link["rel"] == "self"][0] + return href + + + def _create_body(self, name, flavor=None, volume=None, databases=None, + users=None, character_set=None, collate=None, password=None, + database_names=None): + """ + Used to create the dict required to create any of the following: + A database instance + A database for an instance + A user for an instance + """ + if character_set is not None: + # Creating a database + body = {"databases": [ + {"name": name, + "character_set": character_set, + "collate": collate, + }]} + elif password is not None: + # Creating a user + db_dicts = [{"name": db} for db in database_names] + body = {"users": [ + {"name": name, + "password": password, + "databases": db_dicts, + }]} + else: + if flavor is None: + flavor = 1 + flavor_ref = self._get_flavor_ref(flavor) + if volume is None: + volume = 1 + if databases is None: + databases = [] + if users is None: + users = [] + body = {"instance": { + "name": name, + "flavorRef": flavor_ref, + "volume": {"size": volume}, + "databases": databases, + "users": users, + }} + return body diff --git a/awx/lib/site-packages/pyrax/clouddns.py b/awx/lib/site-packages/pyrax/clouddns.py new file mode 100644 index 0000000000..d5f7ebc70c --- /dev/null +++ b/awx/lib/site-packages/pyrax/clouddns.py @@ -0,0 +1,1394 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from functools import wraps +import json +import re +import time + +import pyrax +from pyrax.client import BaseClient +import pyrax.exceptions as exc +from pyrax.manager import BaseManager +from pyrax.resource import BaseResource +import pyrax.utils as utils + +# How long (in seconds) to wait for a response from async operations +DEFAULT_TIMEOUT = 5 +# How long (in seconds) to wait in between checks for async completion +DEFAULT_DELAY = 0.5 + + +def assure_domain(fnc): + @wraps(fnc) + def _wrapped(self, domain, *args, **kwargs): + if not isinstance(domain, CloudDNSDomain): + # Must be the ID + domain = self._manager.get(domain) + return fnc(self, domain, *args, **kwargs) + return _wrapped + + + +class CloudDNSRecord(BaseResource): + """ + This class represents a domain record. + """ + GET_DETAILS = False + # Initialize the supported attributes. + type = None + name = None + data = None + priority = None + ttl = None + comment = None + + + def update(self, data=None, priority=None, ttl=None, comment=None): + """ + Modifies this record. + """ + return self.manager.update_record(self.domain_id, self, data=data, + priority=priority, ttl=ttl, comment=comment) + + + def get(self): + """ + Gets the full information for an existing record for this domain. + """ + return self.manager.get_record(self.domain_id, self) + + + def delete(self): + """ + Deletes an existing record for this domain. + """ + return self.manager.delete_record(self.domain_id, self) + + + +class CloudDNSDomain(BaseResource): + """ + This class represents a DNS domain. + """ + def delete(self, delete_subdomains=False): + """ + Deletes this domain and all of its resource records. If this domain has + subdomains, each subdomain will now become a root domain. If you wish to + also delete any subdomains, pass True to 'delete_subdomains'. + """ + self.manager.delete(self, delete_subdomains=delete_subdomains) + + + def changes_since(self, date_or_datetime): + """ + Gets the changes for this domain since the specified date/datetime. + The date can be one of: + - a Python datetime object + - a Python date object + - a string in the format 'YYYY-MM-YY HH:MM:SS' + - a string in the format 'YYYY-MM-YY' + + It returns a list of dicts, whose keys depend on the specific change + that was made. A simple example of such a change dict: + + {u'accountId': 000000, + u'action': u'update', + u'changeDetails': [{u'field': u'serial_number', + u'newValue': u'1354038941', + u'originalValue': u'1354038940'}, + {u'field': u'updated_at', + u'newValue': u'Tue Nov 27 17:55:41 UTC 2012', + u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}], + u'domain': u'example.com', + u'targetId': 00000000, + u'targetType': u'Domain'} + """ + return self.manager.changes_since(self, date_or_datetime) + + + def export(self): + """ + Provides the BIND (Berkeley Internet Name Domain) 9 formatted contents + of the requested domain. This call is for a single domain only, and as such, + does not provide subdomain information. + + Sample export: + {u'accountId': 000000, + u'contentType': u'BIND_9', + u'contents': u'example.com.\t3600\tIN\tSOA\tns.rackspace.com. ' + 'foo@example.com. 1354202974 21600 3600 1814400 500' + 'example.com.\t3600\tIN\tNS\tdns1.stabletransit.com.' + 'example.com.\t3600\tIN\tNS\tdns2.stabletransit.com.', + u'id': 1111111} + """ + return self.manager.export_domain(self) + + + def update(self, emailAddress=None, ttl=None, comment=None): + """ + Provides a way to modify the following attributes of a domain + entry: + - email address + - ttl setting + - comment + """ + return self.manager.update_domain(self, emailAddress=emailAddress, + ttl=ttl, comment=comment) + + + def list_subdomains(self, limit=None, offset=None): + """ + Returns a list of all subdomains for this domain. + """ + return self.manager.list_subdomains(self, limit=limit, offset=offset) + + + def list_records(self, limit=None, offset=None): + """ + Returns a list of all records configured for this domain. + """ + return self.manager.list_records(self, limit=limit, offset=offset) + + + def search_records(self, record_type, name=None, data=None): + """ + Returns a list of all records configured for this domain that match + the supplied search criteria. + """ + return self.manager.search_records(self, record_type=record_type, + name=name, data=data) + + + def find_record(self, record_type, name=None, data=None): + """ + Returns a single record for this domain that matches the supplied + search criteria. + + If no record matches, a DomainRecordNotFound exception will be raised. + If more than one matches, a DomainRecordNotUnique exception will + be raised. + """ + matches = self.manager.search_records(self, record_type=record_type, + name=name, data=data) + if not matches: + raise exc.DomainRecordNotFound + elif len(matches) > 1: + raise exc.DomainRecordNotUnique + return matches[0] + + + def add_records(self, records): + """ + Adds the records to this domain. Each record should be a dict with the + following keys: + - type (required) + - name (required) + - data (required) + - ttl (optional) + - comment (optional) + - priority (required for MX and SRV records; forbidden otherwise) + """ + return self.manager.add_records(self, records) + + # Create an alias, so that adding a single record is more intuitive + add_record = add_records + + + def get_record(self, record): + """ + Gets the full information for an existing record for this domain. + """ + return self.manager.get_record(self, record) + + + def update_record(self, record, data=None, priority=None, + ttl=None, comment=None): + """ + Modifies an existing record for this domain. + """ + return self.manager.update_record(self, record, data=data, + priority=priority, ttl=ttl, comment=comment) + + + def delete_record(self, record): + """ + Deletes an existing record for this domain. + """ + return self.manager.delete_record(self, record) + + +class CloudDNSPTRRecord(object): + """ + This represents a Cloud DNS PTR record (reverse DNS). + """ + def __init__(self, data=None, device=None): + self.type = self.id = self.data = self.name = None + self.ttl = self.comment = None + if data: + for key, val in data.items(): + setattr(self, key, val) + self.device = device + + + def delete(self): + """ + Deletes this PTR record from its device. + """ + return pyrax.cloud_dns.delete_ptr_records(self.device, self.data) + + + def __repr__(self): + reprkeys = ("id", "data", "name", "ttl") + info = ", ".join("%s=%s" % (key, getattr(self, key)) for key in reprkeys) + return "<%s %s>" % (self.__class__.__name__, info) + + + +class CloudDNSManager(BaseManager): + def __init__(self, api, resource_class=None, response_key=None, + plural_response_key=None, uri_base=None): + super(CloudDNSManager, self).__init__(api, resource_class=resource_class, + response_key=response_key, plural_response_key=plural_response_key, + uri_base=uri_base) + self._paging = {"domain": {}, "subdomain": {}, "record": {}} + self._reset_paging(service="all") + self._timeout = DEFAULT_TIMEOUT + self._delay = DEFAULT_DELAY + + + def _set_timeout(self, timeout): + """ + Changes the duration for which the program will wait for a response from + the DNS system. Setting the timeout to zero will make that program wait + an indefinite amount of time. + """ + self._timeout = timeout + + + def _set_delay(self, delay): + """ + Changes the interval that the program will pause in between attempts to + see if a request has completed. + """ + self._delay = delay + + + def _reset_paging(self, service, body=None): + """ + Resets the internal attributes when there is no current paging request. + """ + if service == "all": + for svc in self._paging.keys(): + svc_dct = self._paging[svc] + svc_dct["next_uri"] = svc_dct["prev_uri"] = None + svc_dct["total_entries"] = None + return + svc_dct = self._paging[service] + svc_dct["next_uri"] = svc_dct["prev_uri"] = None + svc_dct["total_entries"] = None + if not body: + return + svc_dct["total_entries"] = body.get("totalEntries") + links = body.get("links") + uri_base = self.uri_base + if links: + for link in links: + href = link["href"] + pos = href.index(uri_base) + page_uri = href[pos - 1:] + if link["rel"] == "next": + svc_dct["next_uri"] = page_uri + elif link["rel"] == "previous": + svc_dct["prev_uri"] = page_uri + + + def _get_pagination_qs(self, limit, offset): + pagination_items = [] + if limit is not None: + pagination_items.append("limit=%s" % limit) + if offset is not None: + pagination_items.append("offset=%s" % offset) + qs = "&".join(pagination_items) + qs = "?%s" % qs if qs else "" + return qs + + + def list(self, limit=None, offset=None): + """Gets a list of all domains, or optionally a page of domains.""" + uri = "/%s%s" % (self.uri_base, self._get_pagination_qs(limit, offset)) + return self._list(uri) + + + def _list(self, uri, obj_class=None, list_all=False): + """ + Handles the communication with the API when getting + a full listing of the resources managed by this class. + """ + resp, resp_body = self.api.method_get(uri) + if obj_class is None: + obj_class = self.resource_class + + data = resp_body[self.plural_response_key] + ret = [obj_class(self, res, loaded=False) + for res in data if res] + self._reset_paging("domain", resp_body) + if list_all: + dom_paging = self._paging.get("domain", {}) + while dom_paging.get("next_uri"): + next_uri = dom_paging.get("next_uri") + ret.extend(self._list(uri=next_uri, obj_class=obj_class, + list_all=False)) + return ret + + + def list_previous_page(self): + """ + When paging through results, this will return the previous page, using + the same limit. If there are no more results, a NoMoreResults exception + will be raised. + """ + uri = self._paging.get("domain", {}).get("prev_uri") + if uri is None: + raise exc.NoMoreResults("There are no previous pages of domains " + "to list.") + return self._list(uri) + + + def list_next_page(self): + """ + When paging through results, this will return the next page, using the + same limit. If there are no more results, a NoMoreResults exception + will be raised. + """ + uri = self._paging.get("domain", {}).get("next_uri") + if uri is None: + raise exc.NoMoreResults("There are no more pages of domains to " + "list.") + return self._list(uri) + + + def _get(self, uri): + """ + Handles the communication with the API when getting + a specific resource managed by this class. + + Because DNS returns a different format for the body, + the BaseManager method must be overridden here. + """ + uri = "%s?showRecords=false&showSubdomains=false" % uri + resp, body = self.api.method_get(uri) + body["records"] = [] + return self.resource_class(self, body, loaded=True) + + + def _async_call(self, uri, body=None, method="GET", error_class=None, + has_response=True, *args, **kwargs): + """ + Handles asynchronous call/responses for the DNS API. + + Returns the response headers and body if the call was successful. + If an error status is returned, and the 'error_class' parameter is + specified, that class of error will be raised with the details from + the response. If no error class is specified, the response headers + and body will be returned to the calling method, which will have + to handle the result. + """ + api_methods = { + "GET": self.api.method_get, + "POST": self.api.method_post, + "PUT": self.api.method_put, + "DELETE": self.api.method_delete, + } + api_method = api_methods[method] + if body is None: + resp, resp_body = api_method(uri, *args, **kwargs) + else: + resp, resp_body = api_method(uri, body=body, *args, **kwargs) + callbackURL = resp_body["callbackUrl"].split("/status/")[-1] + massagedURL = "/status/%s?showDetails=true" % callbackURL + start = time.time() + timed_out = False + while (resp_body["status"] == "RUNNING") and not timed_out: + resp, resp_body = self.api.method_get(massagedURL) + if self._timeout: + timed_out = ((time.time() - start) > self._timeout) + time.sleep(self._delay) + if error_class and (resp_body["status"] == "ERROR"): + # This call will handle raising the error. + self._process_async_error(resp_body, error_class) + if timed_out: + raise exc.DNSCallTimedOut("The API call to '%s' did not complete " + "after %s seconds." % (uri, self._timeout)) + if has_response: + ret = resp, resp_body["response"] + else: + ret = resp, resp_body + try: + resp_body = json.loads(resp_body) + except Exception: + pass + return ret + + + def _process_async_error(self, resp_body, error_class): + """ + The DNS API does not return a consistent format for their error + messages. This abstracts out the differences in order to present + a single unified message in the exception to be raised. + """ + def _fmt_error(err): + # Remove the cumbersome Java-esque message + details = err["details"].replace("\n", " ") + if not details: + details = err["message"] + return "%s (%s)" % (details, err["code"]) + + error = resp_body["error"] + if "failedItems" in error: + # Multi-error response + faults = error["failedItems"]["faults"] + msgs = [_fmt_error(fault) for fault in faults] + msg = "\n".join(msgs) + else: + msg = _fmt_error(error) + raise error_class(msg) + + + def _create(self, uri, body, records=None, subdomains=None, + return_none=False, return_raw=False, **kwargs): + """ + Handles the communication with the API when creating a new + resource managed by this class. + + Since DNS works completely differently for create() than the other + APIs, this method overrides the default BaseManager behavior. + + If 'records' are supplied, they should be a list of dicts. Each + record dict should have the following format: + + {"name": "example.com", + "type": "A", + "data": "192.0.2.17", + "ttl": 86400} + + If 'subdomains' are supplied, they should be a list of dicts. Each + subdomain dict should have the following format: + + {"name": "sub1.example.com", + "comment": "1st sample subdomain", + "emailAddress": "sample@rackspace.com"} + """ + self.run_hooks("modify_body_for_create", body, **kwargs) + resp, resp_body = self._async_call(uri, body=body, method="POST", + error_class=exc.DomainCreationFailed) + response_body = resp_body[self.response_key][0] + return self.resource_class(self, response_body) + + + def delete(self, domain, delete_subdomains=False): + """ + Deletes the specified domain and all of its resource records. If the + domain has subdomains, each subdomain will now become a root domain. If + you wish to also delete any subdomains, pass True to 'delete_subdomains'. + """ + uri = "/%s/%s" % (self.uri_base, utils.get_id(domain)) + if delete_subdomains: + uri = "%s?deleteSubdomains=true" % uri + resp, resp_body = self._async_call(uri, method="DELETE", + error_class=exc.DomainDeletionFailed, has_response=False) + + + def findall(self, **kwargs): + """ + Finds all items with attributes matching ``**kwargs``. + + Normally this isn't very efficient, since the default action is to + load the entire list and then filter on the Python side, but the DNS + API provides a more efficient search option when filtering on name. + So if the filter is on name, use that; otherwise, use the default. + """ + if (len(kwargs) == 1) and ("name" in kwargs): + # Filtering on name; use the more efficient method. + nm = kwargs["name"] + uri = "/%s?name=%s" % (self.uri_base, nm) + matches = self._list(uri, list_all=True) + return [match for match in matches + if match.name == nm] + else: + return super(CloudDNSManager, self).findall(**kwargs) + + + def changes_since(self, domain, date_or_datetime): + """ + Gets the changes for a domain since the specified date/datetime. + The date can be one of: + - a Python datetime object + - a Python date object + - a string in the format 'YYYY-MM-YY HH:MM:SS' + - a string in the format 'YYYY-MM-YY' + + It returns a list of dicts, whose keys depend on the specific change + that was made. A simple example of such a change dict: + + {u'accountId': 000000, + u'action': u'update', + u'changeDetails': [{u'field': u'serial_number', + u'newValue': u'1354038941', + u'originalValue': u'1354038940'}, + {u'field': u'updated_at', + u'newValue': u'Tue Nov 27 17:55:41 UTC 2012', + u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}], + u'domain': u'example.com', + u'targetId': 00000000, + u'targetType': u'Domain'} + """ + domain_id = utils.get_id(domain) + dt = utils.iso_time_string(date_or_datetime, show_tzinfo=True) + uri = "/domains/%s/changes?since=%s" % (domain_id, dt) + resp, body = self.api.method_get(uri) + return body.get("changes", []) + + + def export_domain(self, domain): + """ + Provides the BIND (Berkeley Internet Name Domain) 9 formatted contents + of the requested domain. This call is for a single domain only, and as + such, does not provide subdomain information. + + Sample export: + {u'accountId': 000000, + u'contentType': u'BIND_9', + u'contents': u'example.com.\t3600\tIN\tSOA\tns.rackspace.com. ' + 'foo@example.com. 1354202974 21600 3600 1814400 500' + 'example.com.\t3600\tIN\tNS\tdns1.stabletransit.com.' + 'example.com.\t3600\tIN\tNS\tdns2.stabletransit.com.', + u'id': 1111111} + """ + uri = "/domains/%s/export" % utils.get_id(domain) + resp, resp_body = self._async_call(uri, method="GET", + error_class=exc.NotFound) + return resp_body.get("contents", "") + + + def import_domain(self, domain_data): + """ + Takes a string in the BIND 9 format and creates a new domain. See the + 'export_domain()' method for a description of the format. + """ + uri = "/domains/import" + body = {"domains": [{ + "contentType": "BIND_9", + "contents": domain_data, + }]} + resp, resp_body = self._async_call(uri, method="POST", body=body, + error_class=exc.DomainCreationFailed) + return resp_body + + + def update_domain(self, domain, emailAddress=None, ttl=None, comment=None): + """ + Provides a way to modify the following attributes of a domain + record: + - email address + - ttl setting + - comment + """ + if not any((emailAddress, ttl, comment)): + raise exc.MissingDNSSettings( + "No settings provided to update_domain().") + uri = "/domains/%s" % utils.get_id(domain) + body = {"comment": comment, + "ttl": ttl, + "emailAddress": emailAddress, + } + none_keys = [key for key, val in body.items() + if val is None] + for none_key in none_keys: + body.pop(none_key) + resp, resp_body = self._async_call(uri, method="PUT", body=body, + error_class=exc.DomainUpdateFailed, has_response=False) + return resp_body + + + def list_subdomains(self, domain, limit=None, offset=None): + """ + Returns a list of all subdomains of the specified domain. + """ + # The commented-out uri is the official API, but it is + # horribly slow. +# uri = "/domains/%s/subdomains" % utils.get_id(domain) + uri = "/domains?name=%s" % domain.name + page_qs = self._get_pagination_qs(limit, offset) + if page_qs: + uri = "%s&%s" % (uri, page_qs[1:]) + return self._list_subdomains(uri, domain.id) + + + def _list_subdomains(self, uri, domain_id): + resp, body = self.api.method_get(uri) + self._reset_paging("subdomain", body) + subdomains = body.get("domains", []) + return [CloudDNSDomain(self, subdomain, loaded=False) + for subdomain in subdomains + if subdomain["id"] != domain_id] + + + def list_subdomains_previous_page(self): + """ + When paging through subdomain results, this will return the previous + page, using the same limit. If there are no more results, a + NoMoreResults exception will be raised. + """ + uri = self._paging.get("subdomain", {}).get("prev_uri") + if uri is None: + raise exc.NoMoreResults("There are no previous pages of subdomains " + "to list.") + return self._list_subdomains(uri) + + + def list_subdomains_next_page(self): + """ + When paging through subdomain results, this will return the next page, + using the same limit. If there are no more results, a NoMoreResults + exception will be raised. + """ + uri = self._paging.get("subdomain", {}).get("next_uri") + if uri is None: + raise exc.NoMoreResults("There are no more pages of subdomains " + "to list.") + return self._list_subdomains(uri) + + + def list_records(self, domain, limit=None, offset=None): + """ + Returns a list of all records configured for the specified domain. + """ + uri = "/domains/%s/records%s" % (utils.get_id(domain), + self._get_pagination_qs(limit, offset)) + return self._list_records(uri) + + + def _list_records(self, uri): + resp, body = self.api.method_get(uri) + self._reset_paging("record", body) + # The domain ID will be in the URL + pat = "domains/([^/]+)/records" + mtch = re.search(pat, uri) + dom_id = mtch.groups()[0] + records = body.get("records", []) + for record in records: + record["domain_id"] = dom_id + return [CloudDNSRecord(self, record, loaded=False) + for record in records if record] + + + def list_records_previous_page(self): + """ + When paging through record results, this will return the previous page, + using the same limit. If there are no more results, a NoMoreResults + exception will be raised. + """ + uri = self._paging.get("record", {}).get("prev_uri") + if uri is None: + raise exc.NoMoreResults("There are no previous pages of records " + "to list.") + return self._list_records(uri) + + + def list_records_next_page(self): + """ + When paging through record results, this will return the next page, + using the same limit. If there are no more results, a NoMoreResults + exception will be raised. + """ + uri = self._paging.get("record", {}).get("next_uri") + if uri is None: + raise exc.NoMoreResults("There are no more pages of records to list.") + return self._list_records(uri) + + + def search_records(self, domain, record_type, name=None, data=None): + """ + Returns a list of all records configured for the specified domain that + match the supplied search criteria. + """ + search_params = [] + if name: + search_params.append("name=%s" % name) + if data: + search_params.append("data=%s" % data) + query_string = "&".join(search_params) + dom_id = utils.get_id(domain) + uri = "/domains/%s/records?type=%s" % (dom_id, record_type) + if query_string: + uri = "%s&%s" % (uri, query_string) + resp, body = self.api.method_get(uri) + records = body.get("records", []) + self._reset_paging("record", body) + rec_paging = self._paging.get("record", {}) + while rec_paging.get("next_uri"): + resp, body = self.api.method_get(rec_paging.get("next_uri")) + self._reset_paging("record", body) + records.extend(body.get("records", [])) + for record in records: + record["domain_id"] = dom_id + return [CloudDNSRecord(self, record, loaded=False) + for record in records if record] + + + def add_records(self, domain, records): + """ + Adds the records to this domain. Each record should be a dict with the + following keys: + - type (required) + - name (required) + - data (required) + - ttl (optional) + - comment (optional) + - priority (required for MX and SRV records; forbidden otherwise) + """ + if isinstance(records, dict): + # Single record passed + records = [records] + dom_id = utils.get_id(domain) + uri = "/domains/%s/records" % dom_id + body = {"records": records} + resp, resp_body = self._async_call(uri, method="POST", body=body, + error_class=exc.DomainRecordAdditionFailed, has_response=False) + records = resp_body.get("response", {}).get("records", []) + for record in records: + record["domain_id"] = dom_id + return [CloudDNSRecord(self, record, loaded=False) + for record in records if record] + + + def get_record(self, domain, record): + """ + Gets the full information for an existing record for this domain. + """ + rec_id = utils.get_id(record) + domain_id = utils.get_id(domain) + uri = "/domains/%s/records/%s" % (domain_id, rec_id) + resp, resp_body = self.api.method_get(uri) + resp_body['domain_id'] = domain_id + return CloudDNSRecord(self, resp_body, loaded=False) + + + def update_record(self, domain, record, data=None, priority=None, + ttl=None, comment=None): + """ + Modifies an existing record for a domain. + """ + rec_id = utils.get_id(record) + uri = "/domains/%s/records/%s" % (utils.get_id(domain), rec_id) + body = {"name": record.name} + all_opts = (("data", data), ("priority", priority), ("ttl", ttl), + ("comment", comment)) + opts = [(k, v) for k, v in all_opts if v is not None] + body.update(dict(opts)) + resp, resp_body = self._async_call(uri, method="PUT", body=body, + error_class=exc.DomainRecordUpdateFailed, has_response=False) + return resp_body + + + def delete_record(self, domain, record): + """ + Deletes an existing record for a domain. + """ + uri = "/domains/%s/records/%s" % (utils.get_id(domain), + utils.get_id(record)) + resp, resp_body = self._async_call(uri, method="DELETE", + error_class=exc.DomainRecordDeletionFailed, has_response=False) + return resp_body + + + def _get_ptr_details(self, device, device_type): + """ + Takes a device and device type and returns the corresponding HREF link + and service name for use with PTR record management. + """ + if device_type.lower().startswith("load"): + ep = pyrax._get_service_endpoint("load_balancer") + svc = "loadbalancers" + svc_name = "cloudLoadBalancers" + else: + ep = pyrax._get_service_endpoint("compute") + svc = "servers" + svc_name = "cloudServersOpenStack" + href = "%s/%s/%s" % (ep, svc, utils.get_id(device)) + return (href, svc_name) + + + def _resolve_device_type(self, device): + """ + Given a device, determines if it is a CloudServer, a CloudLoadBalancer, + or an invalid device. + """ + try: + from tests.unit import fakes + server_types = (pyrax.CloudServer, fakes.FakeServer, + fakes.FakeDNSDevice) + lb_types = (pyrax.CloudLoadBalancer, fakes.FakeLoadBalancer) + except ImportError: + # Not running with tests + server_types = (pyrax.CloudServer, ) + lb_types = (pyrax.CloudLoadBalancer, ) + if isinstance(device, server_types): + device_type = "server" + elif isinstance(device, lb_types): + device_type = "loadbalancer" + else: + raise exc.InvalidDeviceType("The device '%s' must be a CloudServer " + "or a CloudLoadBalancer." % device) + return device_type + + + def list_ptr_records(self, device): + """ + Returns a list of all PTR records configured for this device. + """ + device_type = self._resolve_device_type(device) + href, svc_name = self._get_ptr_details(device, device_type) + uri = "/rdns/%s?href=%s" % (svc_name, href) + try: + resp, resp_body = self.api.method_get(uri) + except exc.NotFound: + return [] + records = [CloudDNSPTRRecord(rec, device) + for rec in resp_body.get("records", [])] + return records + + + def add_ptr_records(self, device, records): + """ + Adds one or more PTR records to the specified device. + """ + device_type = self._resolve_device_type(device) + href, svc_name = self._get_ptr_details(device, device_type) + if not isinstance(records, (list, tuple)): + records = [records] + body = {"recordsList": { + "records": records}, + "link": { + "content": "", + "href": href, + "rel": svc_name, + }} + uri = "/rdns" + # This is a necessary hack, so here's why: if you attempt to add + # PTR records to device, and you don't have rights to either the device + # or the IP address, the DNS API will return a 401 - Unauthorized. + # Unfortunately, the pyrax client interprets this as a bad auth token, + # and there is no way to distinguish this from an actual authentication + # failure. The client will attempt to re-authenticate as a result, and + # will fail, due to the DNS API not having regional endpoints. The net + # result is that an EndpointNotFound exception will be raised, which + # we catch here and then raise a more meaningful exception. + # The Rackspace DNS team is working on changing this to return a 403 + # instead; when that happens this kludge can go away. + try: + resp, resp_body = self._async_call(uri, body=body, method="POST", + error_class=exc.PTRRecordCreationFailed) + except exc.EndpointNotFound: + raise exc.InvalidPTRRecord("The domain/IP address information is not " + "valid for this device.") + return resp_body.get("records") + records = [CloudDNSPTRRecord(rec, device) + for rec in resp_body.get("records", [])] + return records + + + def update_ptr_record(self, device, record, domain_name, data=None, + ttl=None, comment=None): + """ + Updates a PTR record with the supplied values. + """ + device_type = self._resolve_device_type(device) + href, svc_name = self._get_ptr_details(device, device_type) + try: + rec_id = record.id + except AttributeError: + rec_id = record + rec = {"name": domain_name, + "id": rec_id, + "type": "PTR", + "data": data, + } + if ttl is not None: + # Minimum TTL is 300 seconds + rec["ttl"] = max(300, ttl) + if comment is not None: + # Maximum comment length is 160 chars + rec["comment"] = comment[:160] + body = {"recordsList": { + "records": [rec]}, + "link": { + "content": "", + "href": href, + "rel": svc_name, + }} + uri = "/rdns" + try: + resp, resp_body = self._async_call(uri, body=body, method="PUT", + has_response=False, error_class=exc.PTRRecordUpdateFailed) + except exc.EndpointNotFound as e: + raise exc.InvalidPTRRecord("The record domain/IP address " + "information is not valid for this device.") + return resp_body.get("status") == "COMPLETED" + + + def delete_ptr_records(self, device, ip_address=None): + """ + Deletes the PTR records for the specified device. If 'ip_address' is + supplied, only the PTR records with that IP address will be deleted. + """ + device_type = self._resolve_device_type(device) + href, svc_name = self._get_ptr_details(device, device_type) + uri = "/rdns/%s?href=%s" % (svc_name, href) + if ip_address: + uri = "%s&ip=%s" % (uri, ip_address) + resp, resp_body = self._async_call(uri, method="DELETE", + has_response=False, + error_class=exc.PTRRecordDeletionFailed) + return resp_body.get("status") == "COMPLETED" + + + +class CloudDNSClient(BaseClient): + """ + This is the primary class for interacting with Cloud DNS. + """ + name = "Cloud DNS" + + def _configure_manager(self): + """ + Creates a manager to handle the instances, and another + to handle flavors. + """ + self._manager = CloudDNSManager(self, resource_class=CloudDNSDomain, + response_key="domains", plural_response_key="domains", + uri_base="domains") + + + def _create_body(self, name, emailAddress, ttl=3600, comment=None, + subdomains=None, records=None): + """ + Creates the appropriate dict for creating a new domain. + """ + if subdomains is None: + subdomains = [] + if records is None: + records = [] + body = {"domains": [{ + "name": name, + "emailAddress": emailAddress, + "ttl": ttl, + "comment": comment, + "subdomains": { + "domains": subdomains + }, + "recordsList": { + "records": records + }, + }]} + return body + + + def set_timeout(self, timeout): + """ + Sets the amount of time that calls will wait for a response from + the DNS system before timing out. Setting the timeout to zero will + cause execution to wait indefinitely until the call completes. + """ + self._manager._set_timeout(timeout) + + + def set_delay(self, delay): + """ + Changes the interval that the program will pause in between attempts to + see if a request has completed. + """ + self._manager._set_delay(delay) + + + def list(self, limit=None, offset=None): + """Returns a list of all resources.""" + return self._manager.list(limit=limit, offset=offset) + + + def list_previous_page(self): + """Returns the previous page of results.""" + return self._manager.list_previous_page() + + + def list_next_page(self): + """Returns the next page of results.""" + return self._manager.list_next_page() + + + def get_domain_iterator(self): + """ + Returns an iterator that will return each available domain. If there are + more than the limit of 100 domains, the iterator will continue to fetch + domains from the API until all domains have been returned. + """ + return DomainResultsIterator(self._manager) + + + @assure_domain + def changes_since(self, domain, date_or_datetime): + """ + Gets the changes for a domain since the specified date/datetime. + The date can be one of: + - a Python datetime object + - a Python date object + - a string in the format 'YYYY-MM-YY HH:MM:SS' + - a string in the format 'YYYY-MM-YY' + + It returns a list of dicts, whose keys depend on the specific change + that was made. A simple example of such a change dict: + + {u'accountId': 000000, + u'action': u'update', + u'changeDetails': [{u'field': u'serial_number', + u'newValue': u'1354038941', + u'originalValue': u'1354038940'}, + {u'field': u'updated_at', + u'newValue': u'Tue Nov 27 17:55:41 UTC 2012', + u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}], + u'domain': u'example.com', + u'targetId': 00000000, + u'targetType': u'Domain'} + """ + return domain.changes_since(date_or_datetime) + + + @assure_domain + def export_domain(self, domain): + """ + Provides the BIND (Berkeley Internet Name Domain) 9 formatted contents + of the requested domain. This call is for a single domain only, and as + such, does not provide subdomain information. + + Sample export: + + {u'accountId': 000000, + u'contentType': u'BIND_9', + u'contents': u'example.com.\t3600\tIN\tSOA\tns.rackspace.com. ' + 'foo@example.com. 1354202974 21600 3600 1814400 500' + 'example.com.\t3600\tIN\tNS\tdns1.stabletransit.com.' + 'example.com.\t3600\tIN\tNS\tdns2.stabletransit.com.', + u'id': 1111111} + """ + return domain.export() + + + def import_domain(self, domain_data): + """ + Takes a string in the BIND 9 format and creates a new domain. See the + 'export_domain()' method for a description of the format. + """ + return self._manager.import_domain(domain_data) + + + @assure_domain + def update_domain(self, domain, emailAddress=None, ttl=None, comment=None): + """ + Provides a way to modify the following attributes of a domain + record: + - email address + - ttl setting + - comment + """ + return domain.update(emailAddress=emailAddress, + ttl=ttl, comment=comment) + + + @assure_domain + def delete(self, domain, delete_subdomains=False): + """ + Deletes the specified domain and all of its resource records. If the + domain has subdomains, each subdomain will now become a root domain. If + you wish to also delete any subdomains, pass True to 'delete_subdomains'. + """ + domain.delete(delete_subdomains=delete_subdomains) + + + @assure_domain + def list_subdomains(self, domain, limit=None, offset=None): + """ + Returns a list of all subdomains for the specified domain. + """ + return domain.list_subdomains(limit=limit, offset=offset) + + + def get_subdomain_iterator(self, domain, limit=None, offset=None): + """ + Returns an iterator that will return each available subdomain for the + specified domain. If there are more than the limit of 100 subdomains, + the iterator will continue to fetch subdomains from the API until all + subdomains have been returned. + """ + return SubdomainResultsIterator(self._manager, domain=domain) + + + def list_subdomains_previous_page(self): + """Returns the previous page of subdomain results.""" + return self._manager.list_subdomains_previous_page() + + + def list_subdomains_next_page(self): + """Returns the next page of subdomain results.""" + return self._manager.list_subdomains_next_page() + + + @assure_domain + def list_records(self, domain, limit=None, offset=None): + """ + Returns a list of all records configured for the specified domain. + """ + return domain.list_records(limit=limit, offset=offset) + + + def get_record_iterator(self, domain): + """ + Returns an iterator that will return each available DNS record for the + specified domain. If there are more than the limit of 100 records, the + iterator will continue to fetch records from the API until all records + have been returned. + """ + return RecordResultsIterator(self._manager, domain=domain) + + + def list_records_previous_page(self): + """Returns the previous page of record results.""" + return self._manager.list_records_previous_page() + + + def list_records_next_page(self): + """Returns the next page of record results.""" + return self._manager.list_records_next_page() + + + @assure_domain + def search_records(self, domain, record_type, name=None, data=None): + """ + Returns a list of all records configured for the specified domain + that match the supplied search criteria. + """ + return domain.search_records(record_type=record_type, + name=name, data=data) + + + @assure_domain + def find_record(self, domain, record_type, name=None, data=None): + """ + Returns a single record for this domain that matches the supplied + search criteria. + + If no record matches, a DomainRecordNotFound exception will be raised. + If more than one matches, a DomainRecordNotUnique exception will + be raised. + """ + return domain.find_record(record_type=record_type, + name=name, data=data) + + + @assure_domain + def add_records(self, domain, records): + """ + Adds the records to this domain. Each record should be a dict with the + following keys: + - type (required) + - name (required) + - data (required) + - ttl (optional) + - comment (optional) + - priority (required for MX and SRV records; forbidden otherwise) + """ + return domain.add_records(records) + + #Create an alias, so that adding a single record is more intuitive + add_record = add_records + + + @assure_domain + def update_record(self, domain, record, data=None, priority=None, + ttl=None, comment=None): + """ + Modifies an existing record for a domain. + """ + return domain.update_record(record, data=data, + priority=priority, ttl=ttl, comment=comment) + + + @assure_domain + def delete_record(self, domain, record): + """ + Deletes an existing record for this domain. + """ + return domain.delete_record(record) + + + def list_ptr_records(self, device): + """ + Returns a list of all PTR records configured for this device. + """ + return self._manager.list_ptr_records(device) + + + def add_ptr_records(self, device, records): + """ + Adds one or more PTR records to the specified device. + """ + return self._manager.add_ptr_records(device, records) + + + def update_ptr_record(self, device, record, domain_name, data=None, + ttl=None, comment=None): + """ + Updates a PTR record with the supplied values. + """ + return self._manager.update_ptr_record(device, record, domain_name, + data=data, ttl=ttl, comment=comment) + + + def delete_ptr_records(self, device, ip_address=None): + """ + Deletes the PTR records for the specified device. If 'ip_address' + is supplied, only the PTR records with that IP address will be deleted. + """ + return self._manager.delete_ptr_records(device, ip_address=ip_address) + + + def get_absolute_limits(self): + """ + Returns a dict with the absolute limits for the current account. + """ + resp, body = self.method_get("/limits") + absolute_limits = body.get("limits", {}).get("absolute") + return absolute_limits + + + def get_rate_limits(self): + """ + Returns a dict with the current rate limit information for domain + and status requests. + """ + resp, body = self.method_get("/limits") + rate_limits = body.get("limits", {}).get("rate") + ret = [] + for rate_limit in rate_limits: + limits = rate_limit["limit"] + uri_limits = {"uri": rate_limit["uri"], + "limits": limits} + ret.append(uri_limits) + return ret + + + +class ResultsIterator(object): + """ + This object will iterate over all the results for a given + type of listing, no matter how many items exist. + + This is an abstract class; subclasses must define the + _init_methods() method. + """ + def __init__(self, manager, domain=None): + self.manager = manager + self.domain = domain + self.domain_id = utils.get_id(domain) if domain else None + self.results = [] + self.next_uri = "" + self.extra_args = tuple() + self._init_methods() + + + def _init_methods(self): + """ + Must be implemented in subclasses. + """ + raise NotImplementedError() + + + def __iter__(self): + return self + + + def next(self): + """ + Return the next available item. If there are no more items in the + local 'results' list, check if there is a 'next_uri' value. If so, + use that to get the next page of results from the API, and return + the first item from that query. + """ + try: + return self.results.pop(0) + except IndexError: + if self.next_uri is None: + raise StopIteration() + else: + if not self.next_uri: + if self.domain: + self.results = self.list_method(self.domain) + else: + self.results = self.list_method() + else: + args = self.extra_args + self.results = self._list_method(self.next_uri, *args) + self.next_uri = self.manager._paging.get( + self.paging_service, {}).get("next_uri") + # We should have more results. + try: + return self.results.pop(0) + except IndexError: + raise StopIteration() + + +class DomainResultsIterator(ResultsIterator): + """ + ResultsIterator subclass for iterating over all domains. + """ + def _init_methods(self): + self.list_method = self.manager.list + self._list_method = self.manager._list + self.paging_service = "domain" + + +class SubdomainResultsIterator(ResultsIterator): + """ + ResultsIterator subclass for iterating over all subdomains. + """ + def _init_methods(self): + self.list_method = self.manager.list_subdomains + self._list_method = self.manager._list_subdomains + self.extra_args = (self.domain_id, ) + self.paging_service = "subdomain" + + +class RecordResultsIterator(ResultsIterator): + """ + ResultsIterator subclass for iterating over all domain records. + """ + def _init_methods(self): + self.list_method = self.manager.list_records + self._list_method = self.manager._list_records + self.paging_service = "record" diff --git a/awx/lib/site-packages/pyrax/cloudloadbalancers.py b/awx/lib/site-packages/pyrax/cloudloadbalancers.py new file mode 100644 index 0000000000..638dd17963 --- /dev/null +++ b/awx/lib/site-packages/pyrax/cloudloadbalancers.py @@ -0,0 +1,1646 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +from functools import wraps + +import pyrax +from pyrax.client import BaseClient +import pyrax.exceptions as exc +from pyrax.manager import BaseManager +from pyrax.resource import BaseResource +import pyrax.utils as utils + + +def assure_parent(fnc): + @wraps(fnc) + def wrapped(self, *args, **kwargs): + lb = self.parent + if not lb: + exc_class = {Node: exc.UnattachedNode, + VirtualIP: exc.UnattachedVirtualIP}[self.__class__] + raise exc_class("No parent Load Balancer for this node could " + "be determined.") + return fnc(self, *args, **kwargs) + return wrapped + + +def assure_loadbalancer(fnc): + @wraps(fnc) + def _wrapped(self, loadbalancer, *args, **kwargs): + if not isinstance(loadbalancer, CloudLoadBalancer): + # Must be the ID + loadbalancer = self._manager.get(loadbalancer) + return fnc(self, loadbalancer, *args, **kwargs) + return _wrapped + + + +class CloudLoadBalancer(BaseResource): + """Represents a Cloud Load Balancer instance.""" + def __init__(self, *args, **kwargs): + self._connection_logging = None + self._content_caching = None + self._session_persistence = None + self._non_display = ["nodes", "virtual_ips"] + super(CloudLoadBalancer, self).__init__(*args, **kwargs) + + + def add_nodes(self, nodes): + """Adds the nodes to this load balancer.""" + return self.manager.add_nodes(self, nodes) + + + def add_virtualip(self, vip): + """Adds the virtual IP to this load balancer.""" + return self.manager.add_virtualip(self, vip) + + + def get_usage(self, start=None, end=None): + """ + Return the usage records for this load balancer. You may optionally + include a start datetime or an end datetime, or both, which will limit + the records to those on or after the start time, and those before or on + the end time. These times should be Python datetime.datetime objects, + Python datetime.date objects, or strings in the format: + "YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD". + """ + return self.manager.get_usage(self, start=start, end=end) + + + def _add_details(self, info): + """Override the base behavior to add Nodes, VirtualIPs, etc.""" + for (key, val) in info.iteritems(): + if key == "nodes": + val = [Node(parent=self, **nd) for nd in val] + elif key == "sessionPersistence": + val = val['persistenceType'] + elif key == "cluster": + val = val['name'] + elif key == "virtualIps": + key = "virtual_ips" + val = [VirtualIP(parent=self, **vip) for vip in val] + setattr(self, key, val) + + + def update(self, name=None, algorithm=None, protocol=None, halfClosed=None, + port=None, timeout=None): + """ + Provides a way to modify the following attributes of a load balancer: + - name + - algorithm + - protocol + - halfClosed + - port + - timeout + """ + return self.manager.update(self, name=name, algorithm=algorithm, + protocol=protocol, halfClosed=halfClosed, port=port, + timeout=timeout) + + + def delete_node(self, node): + """Removes the node from the load balancer.""" + return self.manager.delete_node(self, node) + + + def update_node(self, node, diff=None): + """Updates the node's attributes.""" + return self.manager.update_node(node, diff=diff) + + + def delete_virtualip(self, vip): + """Deletes the VirtualIP from its load balancer.""" + return self.manager.delete_virtualip(self, vip) + + + def get_access_list(self): + """ + Returns the current access list for the load balancer. + """ + return self.manager.get_access_list(self) + + + def add_access_list(self, access_list): + """ + Adds the access list provided to the load balancer. + + The 'access_list' should be a dict in the following format: + + {"accessList": [ + {"address": "192.0.43.10", "type": "DENY"}, + {"address": "192.0.43.11", "type": "ALLOW"}, + ... + {"address": "192.0.43.99", "type": "DENY"}, + ] + } + + If no access list exists, it is created. If an access list + already exists, it is updated with the provided list. + """ + return self.manager.add_access_list(self, access_list) + + + def delete_access_list(self): + """ + Removes the access list from this load balancer. + """ + return self.manager.delete_access_list(self) + + + def delete_access_list_items(self, item_ids): + """ + Removes the item(s) from the load balancer's access list + that match the provided IDs. 'item_ids' should be one or + more access list item IDs. + """ + return self.manager.delete_access_list_items(self, item_ids) + + + def get_health_monitor(self): + """ + Returns a dict representing the health monitor for the load + balancer. If no monitor has been configured, returns an + empty dict. + """ + return self.manager.get_health_monitor(self) + + + def add_health_monitor(self, type, delay=10, timeout=10, + attemptsBeforeDeactivation=3, path="/", statusRegex=None, + bodyRegex=None, hostHeader=None): + """ + Adds a health monitor to the load balancer. If a monitor already + exists, it is updated with the supplied settings. + """ + abd = attemptsBeforeDeactivation + return self.manager.add_health_monitor(self, type=type, delay=delay, + timeout=timeout, attemptsBeforeDeactivation=abd, + path=path, statusRegex=statusRegex, bodyRegex=bodyRegex, + hostHeader=hostHeader) + + + def delete_health_monitor(self): + """ + Deletes the health monitor for the load balancer. + """ + return self.manager.delete_health_monitor(self) + + + def get_connection_throttle(self): + """ + Returns a dict representing the connection throttling information + for the load balancer. If no connection throttle has been configured, + returns an empty dict. + """ + return self.manager.get_connection_throttle(self) + + + def add_connection_throttle(self, maxConnectionRate=None, + maxConnections=None, minConnections=None, rateInterval=None): + """ + Updates the connection throttling information for the load balancer with + the supplied values. At least one of the parameters must be supplied. + """ + if not any((maxConnectionRate, maxConnections, minConnections, + rateInterval)): + # Pointless call + return + return self.manager.add_connection_throttle(self, + maxConnectionRate=maxConnectionRate, maxConnections=maxConnections, + minConnections=minConnections, rateInterval=rateInterval) + + + def delete_connection_throttle(self): + """ + Deletes all connection throttling settings for the load balancer. + """ + return self.manager.delete_connection_throttle(self) + + + def get_ssl_termination(self): + """ + Returns a dict representing the SSL termination configuration + for the load balancer. If SSL termination has not been configured, + returns an empty dict. + """ + return self.manager.get_ssl_termination(self) + + + def add_ssl_termination(self, securePort, privatekey, certificate, + intermediateCertificate=None, enabled=True, + secureTrafficOnly=False): + """ + Adds SSL termination information to the load balancer. If SSL + termination has already been configured, it is updated with the + supplied settings. + """ + return self.manager.add_ssl_termination(self, securePort=securePort, + privatekey=privatekey, certificate=certificate, + intermediateCertificate=intermediateCertificate, + enabled=enabled, secureTrafficOnly=secureTrafficOnly) + + + def update_ssl_termination(self, securePort=None, enabled=None, + secureTrafficOnly=None): + """ + Updates existing SSL termination information for the load balancer + without affecting the existing certificates/keys. + """ + return self.manager.update_ssl_termination(self, securePort=securePort, + enabled=enabled, secureTrafficOnly=secureTrafficOnly) + + + def delete_ssl_termination(self): + """ + Removes SSL termination for the load balancer. + """ + return self.manager.delete_ssl_termination(self) + + + def get_metadata(self): + """ + Returns the current metadata for the load balancer. + """ + return self.manager.get_metadata(self) + + + def set_metadata(self, metadata): + """ + Sets the metadata for the load balancer to the supplied dictionary + of values. Any existing metadata is cleared. + """ + return self.manager.set_metadata(self, metadata) + + + def update_metadata(self, metadata): + """ + Updates the existing metadata for the load balancer with + the supplied dictionary. + """ + return self.manager.update_metadata(self, metadata) + + + def delete_metadata(self, keys=None): + """ + Deletes metadata items specified by the 'keys' parameter for + this load balancer. If no value for 'keys' is provided, all + metadata is deleted. + """ + return self.manager.delete_metadata(self, keys=keys) + + + def get_metadata_for_node(self, node): + """ + Returns the current metadata for the specified node. + """ + return self.manager.get_metadata(self, node=node) + + + def set_metadata_for_node(self, node, metadata): + """ + Sets the metadata for the specified node to the supplied dictionary + of values. Any existing metadata is cleared. + """ + return self.manager.set_metadata(self, metadata, node=node) + + + def update_metadata_for_node(self, node, metadata): + """ + Updates the existing metadata for the specified node with + the supplied dictionary. + """ + return self.manager.update_metadata(self, metadata, node=node) + + + def delete_metadata_for_node(self, node, keys=None): + """ + Deletes metadata items specified by the 'keys' parameter for + the specified node. If no value for 'keys' is provided, all + metadata is deleted. + """ + return self.manager.delete_metadata(self, keys=keys, node=node) + + + def get_error_page(self): + """ + Returns the current error page for the load balancer. + + Load balancers all have a default error page that is shown to + an end user who is attempting to access a load balancer node + that is offline/unavailable. + """ + return self.manager.get_error_page(self) + + + def set_error_page(self, html): + """ + Sets a custom error page for the load balancer. + + A single custom error page may be added per account load balancer + with an HTTP protocol. Page updates will override existing content. + If a custom error page is deleted, or the load balancer is changed + to a non-HTTP protocol, the default error page will be restored. + """ + return self.manager.set_error_page(self, html) + + + def clear_error_page(self): + """ + Resets the error page to the default. + """ + return self.manager.clear_error_page(self) + + + ## BEGIN - property definitions ## + def _get_connection_logging(self): + if self._connection_logging is None: + self._connection_logging = self.manager.get_connection_logging(self) + return self._connection_logging + + def _set_connection_logging(self, val): + self.manager.set_connection_logging(self, val) + self._connection_logging = val + + + def _get_content_caching(self): + if self._content_caching is None: + self._content_caching = self.manager.get_content_caching(self) + return self._content_caching + + def _set_content_caching(self, val): + self.manager.set_content_caching(self, val) + self._content_caching = val + + + def _get_session_persistence(self): + if self._session_persistence is None: + self._session_persistence = self.manager.get_session_persistence(self) + return self._session_persistence + + def _set_session_persistence(self, val): + if val: + if not isinstance(val, basestring) or (val.upper() not in + ("HTTP_COOKIE", "SOURCE_IP")): + raise exc.InvalidSessionPersistenceType("Session Persistence " + "must be one of 'HTTP_COOKIE' or 'SOURCE_IP'. '%s' is " + "not a valid setting." % val) + self.manager.set_session_persistence(self, val) + self._session_persistence = val.upper() + else: + self.manager.delete_session_persistence(self) + self._session_persistence = "" + + + connection_logging = property(_get_connection_logging, + _set_connection_logging, None, "The current state of connection " + "logging. Possible values are True or False.") + content_caching = property(_get_content_caching, _set_content_caching, + None, "The current state of content caching. Possible values are " + "True or False.") + session_persistence = property(_get_session_persistence, + _set_session_persistence, None, "The current state of session " + "persistence. Possible values are either 'HTTP_COOKIE' or " + "'SOURCE_IP', depending on the type of load balancing.") + ## END - property definitions ## + + + +class CloudLoadBalancerManager(BaseManager): + def update(self, lb, name=None, algorithm=None, protocol=None, + halfClosed=None, port=None, timeout=None): + """ + Provides a way to modify the following attributes of a load balancer: + - name + - algorithm + - protocol + - halfClosed + - port + - timeout + """ + body = {} + if name is not None: + body["name"] = name + if algorithm is not None: + body["algorithm"] = algorithm + if protocol is not None: + body["protocol"] = protocol + if halfClosed is not None: + body["halfClosed"] = halfClosed + if port is not None: + body["port"] = port + if timeout is not None: + body["timeout"] = timeout + if not body: + # Nothing passed + return + body = {"loadBalancer": body} + uri = "/loadbalancers/%s" % utils.get_id(lb) + try: + resp, resp_body = self.api.method_put(uri, body=body) + except exc.ClientException as e: + message = e.message + details = e.details + if message and details: + errmsg = "%s - %s" % (message, details) + else: + errmsg = message + raise exc.InvalidLoadBalancerParameters(errmsg) + return resp, resp_body + + def add_nodes(self, lb, nodes): + """Adds the list of nodes to the specified load balancer.""" + if not isinstance(nodes, (list, tuple)): + nodes = [nodes] + node_dicts = [nd.to_dict() for nd in nodes] + resp, body = self.api.method_post("/loadbalancers/%s/nodes" % lb.id, + body={"nodes": node_dicts}) + return resp, body + + + def delete_node(self, loadbalancer, node): + """Removes the node from its load balancer.""" + lb = node.parent + if not lb: + raise exc.UnattachedNode("No parent Load Balancer for this node " + "could be determined.") + resp, body = self.api.method_delete("/loadbalancers/%s/nodes/%s" % + (lb.id, node.id)) + return resp, body + + + def update_node(self, node, diff=None): + """Updates the node's attributes.""" + lb = node.parent + if not lb: + raise exc.UnattachedNode("No parent Load Balancer for this node " + "could be determined.") + + if diff is None: + diff = node._diff() + req_body = {"node": diff} + resp, body = self.api.method_put("/loadbalancers/%s/nodes/%s" % + (lb.id, node.id), body=req_body) + return resp, body + + + def add_virtualip(self, lb, vip): + """Adds the VirtualIP to the specified load balancer.""" + resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id, + body=vip.to_dict()) + return resp, body + + + def delete_virtualip(self, loadbalancer, vip): + """Deletes the VirtualIP from its load balancer.""" + lb = vip.parent + if not lb: + raise exc.UnattachedVirtualIP("No parent Load Balancer for this " + "VirtualIP could be determined.") + resp, body = self.api.method_delete("/loadbalancers/%s/virtualips/%s" % + (lb.id, vip.id)) + return resp, body + + + def get_access_list(self, loadbalancer): + """ + Returns the current access list for the load balancer. + """ + uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + return body.get("accessList") + + + def add_access_list(self, loadbalancer, access_list): + """ + Adds the access list provided to the load balancer. + + The 'access_list' should be a list of dicts in the following format: + + [{"address": "192.0.43.10", "type": "DENY"}, + {"address": "192.0.43.11", "type": "ALLOW"}, + ... + {"address": "192.0.43.99", "type": "DENY"}, + ] + + If no access list exists, it is created. If an access list + already exists, it is updated with the provided list. + """ + req_body = {"accessList": access_list} + uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer) + resp, body = self.api.method_post(uri, body=req_body) + return body + + + def delete_access_list(self, loadbalancer): + """ + Removes the access list from this load balancer. + """ + uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer) + resp, body = self.api.method_delete(uri) + return body + + + def delete_access_list_items(self, loadbalancer, item_ids): + """ + Removes the item(s) from the load balancer's access list + that match the provided IDs. 'item_ids' should be one or + more access list item IDs. + """ + if not isinstance(item_ids, (list, tuple)): + item_ids = [item_ids] + valid_ids = [itm["id"] for itm in self.get_access_list(loadbalancer)] + bad_ids = [str(itm) for itm in item_ids if itm not in valid_ids] + if bad_ids: + raise exc.AccessListIDNotFound("The following ID(s) are not valid " + "Access List items: %s" % ", ".join(bad_ids)) + items = "&".join(["id=%s" % item_id for item_id in item_ids]) + uri = "/loadbalancers/%s/accesslist?%s" % ( + utils.get_id(loadbalancer), items) + # TODO: add the item ids + resp, body = self.api.method_delete(uri) + return body + + + def get_health_monitor(self, loadbalancer): + """ + Returns a dict representing the health monitor for the load + balancer. If no monitor has been configured, returns an + empty dict. + """ + uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + return body.get("healthMonitor", {}) + + + def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10, + attemptsBeforeDeactivation=3, path="/", statusRegex=None, + bodyRegex=None, hostHeader=None): + """ + Adds a health monitor to the load balancer. If a monitor already + exists, it is updated with the supplied settings. + """ + uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer) + req_body = {"healthMonitor": { + "type": type, + "delay": delay, + "timeout": timeout, + "attemptsBeforeDeactivation": attemptsBeforeDeactivation, + }} + uptype = type.upper() + if uptype.startswith("HTTP"): + lb = self._get_lb(loadbalancer) + if uptype != lb.protocol: + raise exc.ProtocolMismatch("Cannot set the Health Monitor type " + "to '%s' when the Load Balancer's protocol is '%s'." % + (type, lb.protocol)) + if not all((path, statusRegex, bodyRegex)): + raise exc.MissingHealthMonitorSettings("When creating an HTTP(S) " + "monitor, you must provide the 'path', 'statusRegex' and " + "'bodyRegex' parameters.") + body_hm = req_body["healthMonitor"] + body_hm["path"] = path + body_hm["statusRegex"] = statusRegex + body_hm["bodyRegex"] = bodyRegex + if hostHeader: + body_hm["hostHeader"] = hostHeader + resp, body = self.api.method_put(uri, body=req_body) + return body + + + def delete_health_monitor(self, loadbalancer): + """ + Deletes the health monitor for the load balancer. + """ + uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer) + resp, body = self.api.method_delete(uri) + + + def get_connection_throttle(self, loadbalancer): + """ + Returns a dict representing the connection throttling information + for the load balancer. If no connection throttle has been configured, + returns an empty dict. + """ + uri = "/loadbalancers/%s/connectionthrottle" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + return body.get("connectionThrottle", {}) + + + def add_connection_throttle(self, loadbalancer, maxConnectionRate=None, + maxConnections=None, minConnections=None, rateInterval=None): + """ + Creates or updates the connection throttling information for the load + balancer. When first creating the connection throttle, all 4 parameters + must be supplied. When updating an existing connection throttle, at + least one of the parameters must be supplied. + """ + settings = {} + if maxConnectionRate: + settings["maxConnectionRate"] = maxConnectionRate + if maxConnections: + settings["maxConnections"] = maxConnections + if minConnections: + settings["minConnections"] = minConnections + if rateInterval: + settings["rateInterval"] = rateInterval + req_body = {"connectionThrottle": settings} + uri = "/loadbalancers/%s/connectionthrottle" % utils.get_id(loadbalancer) + resp, body = self.api.method_put(uri, body=req_body) + return body + + + def delete_connection_throttle(self, loadbalancer): + """ + Deletes all connection throttling settings for the load balancer. + """ + uri = "/loadbalancers/%s/connectionthrottle" % utils.get_id(loadbalancer) + resp, body = self.api.method_delete(uri) + + + def get_ssl_termination(self, loadbalancer): + """ + Returns a dict representing the SSL termination configuration + for the load balancer. If SSL termination has not been configured, + returns an empty dict. + """ + uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer) + try: + resp, body = self.api.method_get(uri) + except exc.NotFound: + # For some reason, instead of returning an empty dict like the + # other API GET calls, this raises a 404. + return {} + return body.get("sslTermination", {}) + + + def add_ssl_termination(self, loadbalancer, securePort, privatekey, certificate, + intermediateCertificate, enabled=True, secureTrafficOnly=False): + """ + Adds SSL termination information to the load balancer. If SSL termination + has already been configured, it is updated with the supplied settings. + """ + uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer) + req_body = {"sslTermination": { + "certificate": certificate, + "enabled": enabled, + "secureTrafficOnly": secureTrafficOnly, + "privatekey": privatekey, + "intermediateCertificate": intermediateCertificate, + "securePort": securePort, + }} + resp, body = self.api.method_put(uri, body=req_body) + return body + + + def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None, + secureTrafficOnly=None): + """ + Updates existing SSL termination information for the load balancer + without affecting the existing certificates/keys. + """ + ssl_info = self.get_ssl_termination(loadbalancer) + if not ssl_info: + raise exc.NoSSLTerminationConfiguration("You must configure SSL " + "termination on this load balancer before attempting " + "to update it.") + if securePort is None: + securePort = ssl_info["securePort"] + if enabled is None: + enabled = ssl_info["enabled"] + if secureTrafficOnly is None: + secureTrafficOnly = ssl_info["secureTrafficOnly"] + uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer) + req_body = {"sslTermination": { + "enabled": enabled, + "secureTrafficOnly": secureTrafficOnly, + "securePort": securePort, + }} + resp, body = self.api.method_put(uri, body=req_body) + return body + + + def delete_ssl_termination(self, loadbalancer): + """ + Deletes the SSL Termination configuration for the load balancer. + """ + uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer) + resp, body = self.api.method_delete(uri) + + + def get_metadata(self, loadbalancer, node=None, raw=False): + """ + Returns the current metadata for the load balancer. If 'node' is + provided, returns the current metadata for that node. + """ + if node: + uri = "/loadbalancers/%s/nodes/%s/metadata" % ( + utils.get_id(loadbalancer), utils.get_id(node)) + else: + uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + meta = body.get("metadata", []) + if raw: + return meta + ret = dict([(itm["key"], itm["value"]) for itm in meta]) + return ret + + + def set_metadata(self, loadbalancer, metadata, node=None): + """ + Sets the metadata for the load balancer to the supplied dictionary + of values. Any existing metadata is cleared. If 'node' is provided, + the metadata for that node is set instead of for the load balancer. + """ + # Delete any existing metadata + self.delete_metadata(loadbalancer, node=node) + # Convert the metadata dict into the list format + metadata_list = [{"key": key, "value": val} + for key, val in metadata.items()] + if node: + uri = "/loadbalancers/%s/nodes/%s/metadata" % ( + utils.get_id(loadbalancer), utils.get_id(node)) + else: + uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer) + req_body = {"metadata": metadata_list} + resp, body = self.api.method_post(uri, body=req_body) + return body + + + def update_metadata(self, loadbalancer, metadata, node=None): + """ + Updates the existing metadata with the supplied dictionary. If + 'node' is supplied, the metadata for that node is updated instead + of for the load balancer. + """ + # Get the existing metadata + md = self.get_metadata(loadbalancer, raw=True) + id_lookup = dict([(itm["key"], itm["id"]) for itm in md]) + metadata_list = [] + # Updates must be done individually + for key, val in metadata.items(): + try: + meta_id = id_lookup[key] + if node: + uri = "/loadbalancers/%s/nodes/%s/metadata/%s" % ( + utils.get_id(loadbalancer), utils.get_id(node), + meta_id) + else: + uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer) + req_body = {"meta": {"value": val}} + resp, body = self.api.method_put(uri, body=req_body) + except KeyError: + # Not an existing key; add to metadata_list + metadata_list.append({"key": key, "value": val}) + if metadata_list: + # New items; POST them + if node: + uri = "/loadbalancers/%s/nodes/%s/metadata" % ( + utils.get_id(loadbalancer), utils.get_id(node)) + else: + uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer) + req_body = {"metadata": metadata_list} + resp, body = self.api.method_post(uri, body=req_body) + + + def delete_metadata(self, loadbalancer, keys=None, node=None): + """ + Deletes metadata items specified by the 'keys' parameter. If no value + for 'keys' is provided, all metadata is deleted. If 'node' is supplied, + the metadata for that node is deleted instead of the load balancer. + """ + if keys and not isinstance(keys, (list, tuple)): + keys = [keys] + md = self.get_metadata(loadbalancer, node=node, raw=True) + if keys: + md = [dct for dct in md if dct["key"] in keys] + if not md: + # Nothing to do; log it? Raise an error? + return + id_list = "&".join(["id=%s" % itm["id"] for itm in md]) + if node: + uri = "/loadbalancers/%s/nodes/%s/metadata?%s" % ( + utils.get_id(loadbalancer), utils.get_id(node), id_list) + else: + uri = "/loadbalancers/%s/metadata?%s" % ( + utils.get_id(loadbalancer), id_list) + resp, body = self.api.method_delete(uri) + return body + + + def get_error_page(self, loadbalancer): + """ + Load Balancers all have a default error page that is shown to + an end user who is attempting to access a load balancer node + that is offline/unavailable. + """ + uri = "/loadbalancers/%s/errorpage" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + return body + + + def set_error_page(self, loadbalancer, html): + """ + A single custom error page may be added per account load balancer + with an HTTP protocol. Page updates will override existing content. + If a custom error page is deleted, or the load balancer is changed + to a non-HTTP protocol, the default error page will be restored. + """ + uri = "/loadbalancers/%s/errorpage" % utils.get_id(loadbalancer) + req_body = {"errorpage": {"content": html}} + resp, body = self.api.method_put(uri, body=req_body) + return body + + + def clear_error_page(self, loadbalancer): + """ + Resets the error page to the default. + """ + uri = "/loadbalancers/%s/errorpage" % utils.get_id(loadbalancer) + resp, body = self.api.method_delete(uri) + return body + + + def get_usage(self, loadbalancer=None, start=None, end=None): + """ + Return the load balancer usage records for this account. If 'loadbalancer' + is None, records for all load balancers are returned. You may optionally + include a start datetime or an end datetime, or both, which will limit + the records to those on or after the start time, and those before or on the + end time. These times should be Python datetime.datetime objects, Python + datetime.date objects, or strings in the format: "YYYY-MM-DD HH:MM:SS" or + "YYYY-MM-DD". + """ + if start is end is None: + period = None + else: + parts = [] + startStr = utils.iso_time_string(start) + if startStr: + parts.append("startTime=%s" % startStr) + endStr = utils.iso_time_string(end) + if endStr: + parts.append("endTime=%s" % endStr) + period = "&".join(parts).strip("&") + if loadbalancer is None: + uri = "/loadbalancers/usage" + else: + uri = "/loadbalancers/%s/usage" % utils.get_id(loadbalancer) + if period: + uri = "%s?%s" % (uri, period) + resp, body = self.api.method_get(uri) + return body + + + def get_stats(self, loadbalancer): + """ + Returns statistics for the given load balancer. + """ + uri = "/loadbalancers/%s/stats" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + return body + + + def get_session_persistence(self, loadbalancer): + """ + Returns the session persistence setting for the given load balancer. + """ + uri = "/loadbalancers/%s/sessionpersistence" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + ret = body["sessionPersistence"].get("persistenceType", "") + return ret + + + def set_session_persistence(self, loadbalancer, val): + """ + Sets the session persistence for the given load balancer. + """ + val = val.upper() + uri = "/loadbalancers/%s/sessionpersistence" % utils.get_id(loadbalancer) + req_body = {"sessionPersistence": { + "persistenceType": val, + }} + resp, body = self.api.method_put(uri, body=req_body) + return body + + + def delete_session_persistence(self, loadbalancer): + """ + Removes the session persistence setting for the given load balancer. + """ + uri = "/loadbalancers/%s/sessionpersistence" % utils.get_id(loadbalancer) + resp, body = self.api.method_delete(uri) + return body + + + def get_connection_logging(self, loadbalancer): + """ + Returns the connection logging setting for the given load balancer. + """ + uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + ret = body.get("connectionLogging", {}).get("enabled", False) + return ret + + + def set_connection_logging(self, loadbalancer, val): + """ + Sets the connection logging for the given load balancer. + """ + uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer) + val = str(val).lower() + req_body = {"connectionLogging": { + "enabled": val, + }} + resp, body = self.api.method_put(uri, body=req_body) + return body + + + def get_content_caching(self, loadbalancer): + """ + Returns the content caching setting for the given load balancer. + """ + uri = "/loadbalancers/%s/contentcaching" % utils.get_id(loadbalancer) + resp, body = self.api.method_get(uri) + ret = body.get("contentCaching", {}).get("enabled", False) + return ret + + + def set_content_caching(self, loadbalancer, val): + """ + Sets the content caching for the given load balancer. + """ + uri = "/loadbalancers/%s/contentcaching" % utils.get_id(loadbalancer) + val = str(val).lower() + req_body = {"contentCaching": { + "enabled": val, + }} + resp, body = self.api.method_put(uri, body=req_body) + return body + + + def _get_lb(self, lb_or_id): + """ + Accepts either a loadbalancer or the ID of a loadbalancer, and returns + the CloudLoadBalancer instance. + """ + if isinstance(lb_or_id, CloudLoadBalancer): + ret = lb_or_id + else: + ret = self.get(lb_or_id) + return ret + + + +class Node(object): + """Represents a Node for a Load Balancer.""" + def __init__(self, address=None, port=None, condition=None, weight=None, + status=None, parent=None, type=None, id=None): + if condition is None: + condition = "ENABLED" + if not all((address, port)): + raise exc.InvalidNodeParameters("You must include an address and a " + "port when creating a node.") + self.address = address + self.port = port + self.condition = condition + if weight is None: + weight = 1 + self.weight = weight + self.status = status + self.parent = parent + self.type = type + self.id = id + self._original_state = self.to_dict() + + + def __repr__(self): + tmp = "<Node type=%s, condition=%s, id=%s, address=%s, port=%s weight=%s>" + return tmp % (self.type, self.condition, self.id, self.address, self.port, + self.weight) + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.__dict__ == other.__dict__) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + """Convert this Node to a dict representation for passing to the API.""" + return {"address": self.address, + "port": self.port, + "condition": self.condition, + } + + + def get_metadata(self): + """ + Returns the current metadata for the node. + """ + return self.manager.get_metadata(self, node=self) + + + def set_metadata(self, metadata): + """ + Sets the metadata for the node to the supplied dictionary + of values. Any existing metadata is cleared. + """ + return self.manager.set_metadata(self, metadata, node=self) + + + def update_metadata(self, metadata): + """ + Updates the existing metadata for the node with + the supplied dictionary. + """ + return self.manager.update_metadata(self, metadata, node=self) + + + def delete_metadata(self, keys=None): + """ + Deletes metadata items specified by the 'keys' parameter for + this node. If no value for 'keys' is provided, all + metadata is deleted. + """ + return self.manager.delete_metadata(self, keys=keys, node=self) + + + @assure_parent + def delete(self): + """Removes this Node from its Load Balancer.""" + self.parent.delete_node(self) + + + def _diff(self): + diff_dict = {} + for att, val in self._original_state.items(): + curr = getattr(self, att) + if curr != val: + diff_dict[att] = curr + return diff_dict + + + @assure_parent + def update(self): + """ + Pushes any local changes to the object up to the actual load + balancer node. + """ + diff = self._diff() + if not diff: + #Nothing to do! + return + self.parent.update_node(self, diff) + + + def get_device(self): + """ + Returns a reference to the device that is represented by this node. + Returns None if no such device can be determined. + """ + addr = self.address + servers = [server for server in pyrax.cloudservers.list() + if addr in server.networks.get("private", "")] + try: + return servers[0] + except IndexError: + return None + + + +class VirtualIP(object): + """Represents a Virtual IP for a Load Balancer.""" + def __init__(self, type=None, address=None, ipVersion=None, id=None, + parent=None): + if type is None: + type = "PUBLIC" + if type.upper() not in ("PUBLIC", "SERVICENET"): + raise exc.InvalidVirtualIPType("Virtual IPs must be one of " + "'PUBLIC' or 'SERVICENET' type; '%s' is not valid." % type) + if not ipVersion: + ipVersion = "IPV4" + if not ipVersion.upper() in ("IPV4", "IPV6"): + raise exc.InvalidVirtualIPVersion("Virtual IP versions must be one " + "of 'IPV4' or 'IPV6'; '%s' is not valid." % ipVersion) + self.type = type + self.address = address + self.ip_version = ipVersion + self.id = id + self.parent = parent + + + def __repr__(self): + return "<VirtualIP type=%s, id=%s, address=%s version=%s>" % ( + self.type, self.id, self.address, self.ip_version) + + + def to_dict(self): + """ + Convert this VirtualIP to a dict representation for passing + to the API. + """ + return {"type": self.type, + "ipVersion": self.ip_version} + + + @assure_parent + def delete(self): + self.parent.delete_virtualip(self) + + + +class CloudLoadBalancerClient(BaseClient): + """ + This is the primary class for interacting with Cloud Load Balancers. + """ + name = "Cloud Load Balancers" + + def __init__(self, *args, **kwargs): + # Bring these two classes into the Client namespace + self.Node = Node + self.VirtualIP = VirtualIP + self._algorithms = None + self._protocols = None + self._allowed_domains = None + super(CloudLoadBalancerClient, self).__init__(*args, **kwargs) + + + def _configure_manager(self): + """ + Creates a manager to handle the instances, and another + to handle flavors. + """ + self._manager = CloudLoadBalancerManager(self, + resource_class=CloudLoadBalancer, + response_key="loadBalancer", uri_base="loadbalancers") + + + def _create_body(self, name, port=None, protocol=None, nodes=None, + virtual_ips=None, algorithm=None, halfClosed=None, accessList=None, + connectionLogging=None, connectionThrottle=None, healthMonitor=None, + metadata=None, timeout=None, sessionPersistence=None): + """ + Used to create the dict required to create a load balancer instance. + """ + required = (nodes, virtual_ips, port, protocol) + if not all(required): + raise exc.MissingLoadBalancerParameters("Load Balancer creation " + "requires at least one node, one virtual IP, " + "a protocol, and a port.") + nodes = utils.coerce_string_to_list(nodes) + virtual_ips = utils.coerce_string_to_list(virtual_ips) + bad_conditions = [node.condition for node in nodes + if node.condition.upper() not in ("ENABLED", "DISABLED")] + if bad_conditions: + raise exc.InvalidNodeCondition("Nodes for new load balancer must be " + "created in either 'ENABLED' or 'DISABLED' condition; " + "received the following invalid conditions: %s" % + ", ".join(set(bad_conditions))) + node_dicts = [nd.to_dict() for nd in nodes] + vip_dicts = [vip.to_dict() for vip in virtual_ips] + body = {"loadBalancer": { + "name": name, + "port": port, + "protocol": protocol, + "nodes": node_dicts, + "virtualIps": vip_dicts, + "algorithm": algorithm or "RANDOM", + "halfClosed": halfClosed, + "accessList": accessList, + "connectionLogging": connectionLogging, + "connectionThrottle": connectionThrottle, + "healthMonitor": healthMonitor, + "metadata": metadata, + "timeout": timeout, + "sessionPersistence": sessionPersistence, + }} + return body + + + def get_usage(self, loadbalancer=None, start=None, end=None): + """ + Return the load balancer usage records for this account. If 'loadbalancer' + is None, records for all load balancers are returned. You may optionally + include a start datetime or an end datetime, or both, which will limit + the records to those on or after the start time, and those before or on the + end time. These times should be Python datetime.datetime objects, Python + datetime.date objects, or strings in the format: "YYYY-MM-DD HH:MM:SS" or + "YYYY-MM-DD". + """ + return self._manager.get_usage(loadbalancer=loadbalancer, start=start, + end=end) + + + @property + def allowed_domains(self): + """ + This property lists the allowed domains for a load balancer. + + The allowed domains are restrictions set for the allowed domain names + used for adding load balancer nodes. In order to submit a domain name + as an address for the load balancer node to add, the user must verify + that the domain is valid by using the List Allowed Domains call. Once + verified, simply supply the domain name in place of the node's address + in the add_nodes() call. + """ + if self._allowed_domains is None: + uri = "/loadbalancers/alloweddomains" + resp, body = self.method_get(uri) + dom_list = body["allowedDomains"] + self._allowed_domains = [itm["allowedDomain"]["name"] + for itm in dom_list] + return self._allowed_domains + + + @property + def algorithms(self): + """ + Returns a list of available load balancing algorithms. + """ + if self._algorithms is None: + uri = "/loadbalancers/algorithms" + resp, body = self.method_get(uri) + self._algorithms = [alg["name"] for alg in body["algorithms"]] + return self._algorithms + + + @property + def protocols(self): + """ + Returns a list of available load balancing protocols. + """ + if self._protocols is None: + uri = "/loadbalancers/protocols" + resp, body = self.method_get(uri) + self._protocols = [proto["name"] for proto in body["protocols"]] + return self._protocols + + + @assure_loadbalancer + def update(self, loadbalancer, name=None, algorithm=None, protocol=None, + halfClosed=None, port=None, timeout=None): + """ + Provides a way to modify the following attributes of a load balancer: + - name + - algorithm + - protocol + - halfClosed + - port + - timeout + """ + return self._manager.update(loadbalancer, name=name, + algorithm=algorithm, protocol=protocol, halfClosed=halfClosed, + port=port, timeout=timeout) + + + @assure_loadbalancer + def add_nodes(self, loadbalancer, nodes): + """Adds the nodes to this load balancer.""" + return loadbalancer.add_nodes(nodes) + + + @assure_loadbalancer + def add_virtualip(self, loadbalancer, vip): + """Adds the virtual IP to this load balancer.""" + return loadbalancer.add_virtualip(vip) + + + def delete_node(self, node): + """Removes the node from its load balancer.""" + return node.delete() + + + def update_node(self, node): + """Updates the node's attributes.""" + return node.update() + + + def delete_virtualip(self, vip): + """Deletes the VirtualIP from its load balancer.""" + return vip.delete() + + + @assure_loadbalancer + def get_access_list(self, loadbalancer): + """ + Returns the current access list for the load balancer. + """ + return loadbalancer.get_access_list() + + + @assure_loadbalancer + def add_access_list(self, loadbalancer, access_list): + """ + Adds the access list provided to the load balancer. + + The 'access_list' should be a dict in the following format: + + {"accessList": [ + {"address": "192.0.43.10", "type": "DENY"}, + {"address": "192.0.43.11", "type": "ALLOW"}, + ... + {"address": "192.0.43.99", "type": "DENY"}, + ] + } + + If no access list exists, it is created. If an access list + already exists, it is updated with the provided list. + """ + return loadbalancer.add_access_list(access_list) + + + @assure_loadbalancer + def delete_access_list(self, loadbalancer): + """ + Removes the access list from this load balancer. + """ + return loadbalancer.delete_access_list() + + + @assure_loadbalancer + def delete_access_list_items(self, loadbalancer, item_ids): + """ + Removes the item(s) from the load balancer's access list + that match the provided IDs. 'item_ids' should be one or + more access list item IDs. + """ + return loadbalancer.delete_access_list_items(item_ids) + + + @assure_loadbalancer + def get_health_monitor(self, loadbalancer): + """ + Returns a dict representing the health monitor for the load + balancer. If no monitor has been configured, returns an + empty dict. + """ + return loadbalancer.get_health_monitor() + + + @assure_loadbalancer + def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10, + attemptsBeforeDeactivation=3, path="/", statusRegex=None, + bodyRegex=None, hostHeader=None): + """ + Adds a health monitor to the load balancer. If a monitor already + exists, it is updated with the supplied settings. + """ + abd = attemptsBeforeDeactivation + return loadbalancer.add_health_monitor(type=type, delay=delay, + timeout=timeout, attemptsBeforeDeactivation=abd, path=path, + statusRegex=statusRegex, bodyRegex=bodyRegex, + hostHeader=hostHeader) + + + @assure_loadbalancer + def delete_health_monitor(self, loadbalancer): + """ + Deletes the health monitor for the load balancer. + """ + return loadbalancer.delete_health_monitor() + + + @assure_loadbalancer + def get_connection_throttle(self, loadbalancer): + """ + Returns a dict representing the connection throttling information + for the load balancer. If no connection throttle has been configured, + returns an empty dict. + """ + return loadbalancer.get_connection_throttle() + + + @assure_loadbalancer + def add_connection_throttle(self, loadbalancer, maxConnectionRate=None, + maxConnections=None, minConnections=None, rateInterval=None): + """ + Updates the connection throttling information for the load balancer with + the supplied values. At least one of the parameters must be supplied. + """ + return loadbalancer.add_connection_throttle( + maxConnectionRate=maxConnectionRate, maxConnections=maxConnections, + minConnections=minConnections, rateInterval=rateInterval) + + + @assure_loadbalancer + def delete_connection_throttle(self, loadbalancer): + """ + Deletes all connection throttling settings for the load balancer. + """ + return loadbalancer.delete_connection_throttle() + + + @assure_loadbalancer + def get_ssl_termination(self, loadbalancer): + """ + Returns a dict representing the SSL termination configuration + for the load balancer. If SSL termination has not been configured, + returns an empty dict. + """ + return loadbalancer.get_ssl_termination() + + + @assure_loadbalancer + def add_ssl_termination(self, loadbalancer, securePort, privatekey, + certificate, intermediateCertificate, enabled=True, + secureTrafficOnly=False): + """ + Adds SSL termination information to the load balancer. If SSL termination + has already been configured, it is updated with the supplied settings. + """ + return loadbalancer.add_ssl_termination(securePort=securePort, + privatekey=privatekey, certificate=certificate, + intermediateCertificate=intermediateCertificate, + enabled=enabled, secureTrafficOnly=secureTrafficOnly) + + + @assure_loadbalancer + def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None, + secureTrafficOnly=None): + """ + Updates existing SSL termination information for the load balancer + without affecting the existing certificates/keys. + """ + return loadbalancer.update_ssl_termination(securePort=securePort, + enabled=enabled, secureTrafficOnly=secureTrafficOnly) + + + @assure_loadbalancer + def delete_ssl_termination(self, loadbalancer): + """ + Removes SSL termination for the load balancer. + """ + return loadbalancer.delete_ssl_termination() + + + @assure_loadbalancer + def get_metadata(self, loadbalancer): + """ + Returns the current metadata for the load balancer. + """ + return loadbalancer.get_metadata() + + + @assure_loadbalancer + def set_metadata(self, loadbalancer, metadata): + """ + Sets the metadata for the load balancer to the supplied dictionary + of values. Any existing metadata is cleared. + """ + return loadbalancer.set_metadata(metadata) + + + @assure_loadbalancer + def update_metadata(self, loadbalancer, metadata): + """ + Updates the existing metadata for the load balancer with + the supplied dictionary. + """ + return loadbalancer.update_metadata(metadata) + + + @assure_loadbalancer + def delete_metadata(self, loadbalancer, keys=None): + """ + Deletes metadata items specified by the 'keys' parameter for + this load balancer. If no value for 'keys' is provided, all + metadata is deleted. + """ + return loadbalancer.delete_metadata(keys=keys) + + + @assure_loadbalancer + def get_metadata_for_node(self, loadbalancer, node): + """ + Returns the current metadata for the specified node. + """ + return loadbalancer.get_metadata_for_node(node) + + + @assure_loadbalancer + def set_metadata_for_node(self, loadbalancer, node, metadata): + """ + Sets the metadata for the specified node to the supplied dictionary + of values. Any existing metadata is cleared. + """ + return loadbalancer.set_metadata_for_node(node, metadata) + + + @assure_loadbalancer + def update_metadata_for_node(self, loadbalancer, node, metadata): + """ + Updates the existing metadata for the specified node with + the supplied dictionary. + """ + return loadbalancer.update_metadata_for_node(node, metadata) + + + @assure_loadbalancer + def delete_metadata_for_node(self, loadbalancer, node, keys=None): + """ + Deletes metadata items specified by the 'keys' parameter for + the specified node. If no value for 'keys' is provided, all + metadata is deleted. + """ + return loadbalancer.delete_metadata_for_node(node, keys=keys) + + + @assure_loadbalancer + def get_error_page(self, loadbalancer): + """ + Load Balancers all have a default error page that is shown to + an end user who is attempting to access a load balancer node + that is offline/unavailable. + """ + return loadbalancer.get_error_page() + + + @assure_loadbalancer + def set_error_page(self, loadbalancer, html): + """ + A single custom error page may be added per account load balancer + with an HTTP protocol. Page updates will override existing content. + If a custom error page is deleted, or the load balancer is changed + to a non-HTTP protocol, the default error page will be restored. + """ + return loadbalancer.set_error_page(html) + + + @assure_loadbalancer + def clear_error_page(self, loadbalancer): + """ + Resets the error page to the default. + """ + return loadbalancer.clear_error_page() + + + @assure_loadbalancer + def get_connection_logging(self, loadbalancer): + """ + Returns the current setting for connection logging for the load balancer. + """ + return loadbalancer.connection_logging + + + @assure_loadbalancer + def set_connection_logging(self, loadbalancer, val): + """ + Sets connection logging for the load balancer to either True + or False. + """ + loadbalancer.connection_logging = val + + + @assure_loadbalancer + def get_content_caching(self, loadbalancer): + """ + Returns the current setting for content caching for the load balancer. + """ + return loadbalancer.content_caching + + + @assure_loadbalancer + def set_content_caching(self, loadbalancer, val): + """ + Sets content caching for the load balancer to either True + or False. + """ + loadbalancer.content_caching = val + + + @assure_loadbalancer + def get_session_persistence(self, loadbalancer): + """ + Returns the current setting for session persistence for + the load balancer. + """ + return loadbalancer.session_persistence + + + @assure_loadbalancer + def set_session_persistence(self, loadbalancer, val): + """ + Sets the type of session persistence for the load balancer. This + must be one of either "HTTP_COOKIE" or "SOURCE_IP", depending + on the type of load balancing. + """ + loadbalancer.session_persistence = val + + ## END pass-through methods ## diff --git a/awx/lib/site-packages/pyrax/cloudmonitoring.py b/awx/lib/site-packages/pyrax/cloudmonitoring.py new file mode 100644 index 0000000000..b4ee1d4c89 --- /dev/null +++ b/awx/lib/site-packages/pyrax/cloudmonitoring.py @@ -0,0 +1,1219 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2013 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +from pyrax.client import BaseClient +import pyrax.exceptions as exc +from pyrax.manager import BaseManager +from pyrax.resource import BaseResource +import pyrax.utils as utils + + +_invalid_key_pat = re.compile(r"Validation error for key '([^']+)'") + + +def _params_to_dict(params, dct, local_dict): + for param in params: + val = local_dict.get(param) + if val is None: + continue + dct[param] = val + return dct + + + +class CloudMonitorEntity(BaseResource): + def update(self, agent=None, metadata=None): + """ + Only the agent_id and metadata are able to be updated via the API. + """ + self.manager.update_entity(self, agent=agent, metadata=metadata) + + + def list_checks(self): + """ + Returns a list of all CloudMonitorChecks defined for this entity. + """ + return self.manager.list_checks(self) + + + def delete_check(self, check): + """ + Deletes the specified check from this entity. + """ + return self.manager.delete_check(self, check) + + + def list_metrics(self, check): + """ + Returns a list of all the metrics associated with the specified check. + """ + return self.manager.list_metrics(self, check) + + + def get_metric_data_points(self, check, metric, start, end, points=None, + resolution=None, stats=None): + """ + Returns the data points for a given metric for the given period. The + 'start' and 'end' times must be specified; they can be be either Python + date/datetime values, or a Unix timestamp. + + The 'points' parameter represents the number of points to return. The + 'resolution' parameter represents the granularity of the data. You must + specify either 'points' or 'resolution'. The allowed values for + resolution are: + FULL + MIN5 + MIN20 + MIN60 + MIN240 + MIN1440 + + Finally, the 'stats' parameter specifies the stats you want returned. + By default only the 'average' is returned. You omit this parameter, + pass in a single value, or pass in a list of values. The allowed values + are: + average + variance + min + max + """ + return self.manager.get_metric_data_points(self, check, metric, start, + end, points=points, resolution=resolution, stats=stats) + + + def create_alarm(self, check, notification_plan, criteria=None, + disabled=False, label=None, name=None, metadata=None): + """ + Creates an alarm that binds the check on this entity with a + notification plan. + """ + return self.manager.create_alarm(self, check, notification_plan, + criteria=criteria, disabled=disabled, label=label, name=name, + metadata=metadata) + + + def update_alarm(self, alarm, criteria=None, disabled=False, + label=None, name=None, metadata=None): + """ + Updates an existing alarm on this entity. + """ + return self.manager.update_alarm(self, alarm, criteria=criteria, + disabled=disabled, label=label, name=name, metadata=metadata) + + + def list_alarms(self): + """ + Returns a list of all the alarms created on this entity. + """ + return self.manager.list_alarms(self) + + + def get_alarm(self, alarm): + """ + Returns the alarm with the specified ID for this entity. If a + CloudMonitorAlarm instance is passed, returns a new CloudMonitorAlarm + object with the current state from the API. + """ + return self.manager.get_alarm(self, alarm) + + + def delete_alarm(self, alarm): + """ + Deletes the specified alarm. + """ + return self.manager.delete_alarm(self, alarm) + + + @property + def name(self): + return self.label + + + +class CloudMonitorNotificationManager(BaseManager): + """ + Handles all of the requests dealing with notifications. + """ + def create(self, notification_type, label=None, name=None, details=None): + """ + Defines a notification for handling an alarm. + """ + uri = "/%s" % self.uri_base + body = {"label": label or name, + "type": utils.get_id(notification_type), + "details": details, + } + resp, resp_body = self.api.method_post(uri, body=body) + return self.get(resp["x-object-id"]) + + + def test_notification(self, notification=None, notification_type=None, + details=None): + """ + This allows you to test either an existing notification, or a potential + notification before creating it. The actual notification comes from the + same server where the actual alert messages come from. This allow you + to, among other things, verify that your firewall is configured + properly. + + To test an existing notification, pass it as the 'notification' + parameter and leave the other parameters empty. To pre-test a + notification before creating it, leave 'notification' empty, but pass + in the 'notification_type' and 'details'. + """ + if notification: + # Test an existing notification + uri = "/%s/%s/test" % (self.uri_base, utils.get_id(notification)) + body = None + else: + uri = "/test-notification" + body = {"type": utils.get_id(notification_type), + "details": details} + resp, resp_body = self.api.method_post(uri, body=body) + + + def update_notification(self, notification, details): + """ + Updates the specified notification with the supplied details. + """ + if isinstance(notification, CloudMonitorNotification): + nid = notification.id + ntyp = notification.type + else: + # Supplied an ID + nfcn = self.get(notification) + nid = notification + ntyp = nfcn.type + uri = "/%s/%s" % (self.uri_base, nid) + body = {"type": ntyp, + "details": details} + resp, resp_body = self.api.method_put(uri, body=body) + + + def list_types(self): + """ + Returns a list of all available notification types. + """ + uri = "/notification_types" + resp, resp_body = self.api.method_get(uri) + return [CloudMonitorNotificationType(self, info) + for info in resp_body["values"]] + + + def get_type(self, notification_type_id): + """ + Returns a CloudMonitorNotificationType object for the given ID. + """ + uri = "/notification_types/%s" % utils.get_id(notification_type_id) + resp, resp_body = self.api.method_get(uri) + return CloudMonitorNotificationType(self, resp_body) + + + +class CloudMonitorNotificationPlanManager(BaseManager): + """ + Handles all of the requests dealing with Notification Plans. + """ + def create(self, label=None, name=None, critical_state=None, ok_state=None, + warning_state=None): + """ + Creates a notification plan to be executed when a monitoring check + triggers an alarm. You can optionally label (or name) the plan. + + A plan consists of one or more notifications to be executed when an + associated alarm is triggered. You can have different lists of actions + for CRITICAL, WARNING or OK states. + """ + uri = "/%s" % self.uri_base + body = {"label": label or name} + + def make_list_of_ids(parameter): + params = utils.coerce_string_to_list(parameter) + return [utils.get_id(param) for param in params] + + if critical_state: + critical_state = utils.coerce_string_to_list(critical_state) + body["critical_state"] = make_list_of_ids(critical_state) + if warning_state: + warning_state = utils.coerce_string_to_list(warning_state) + body["warning_state"] = make_list_of_ids(warning_state) + if ok_state: + ok_state = utils.coerce_string_to_list(ok_state) + body["ok_state"] = make_list_of_ids(ok_state) + resp, resp_body = self.api.method_post(uri, body=body) + return self.get(resp["x-object-id"]) + + +class CloudMonitorEntityManager(BaseManager): + """ + Handles all of the entity-specific requests. + """ + def update_entity(self, entity, agent=None, metadata=None): + """ + Updates the specified entity's values with the supplied parameters. + """ + body = {} + if agent: + body["agent_id"] = utils.get_id(agent) + if metadata: + body["metadata"] = metadata + if body: + uri = "/%s/%s" % (self.uri_base, utils.get_id(entity)) + resp, body = self.api.method_put(uri, body=body) + + + def list_checks(self, entity): + """ + Returns a list of all CloudMonitorChecks defined for this entity. + """ + uri = "/%s/%s/checks" % (self.uri_base, utils.get_id(entity)) + resp, resp_body = self.api.method_get(uri) + return [CloudMonitorCheck(self, val, entity) + for val in resp_body["values"]] + + + def create_check(self, entity, label=None, name=None, check_type=None, + details=None, disabled=False, metadata=None, + monitoring_zones_poll=None, timeout=None, period=None, + target_alias=None, target_hostname=None, target_receiver=None, + test_only=False, include_debug=False): + """ + Creates a check on the entity with the specified attributes. The + 'details' parameter should be a dict with the keys as the option name, + and the value as the desired setting. + + If the 'test_only' parameter is True, then the check is not created; + instead, the check is run and the results of the test run returned. If + 'include_debug' is True, additional debug information is returned. + According to the current Cloud Monitoring docs: + "Currently debug information is only available for the + remote.http check and includes the response body." + """ + if details is None: + raise exc.MissingMonitoringCheckDetails("The required 'details' " + "parameter was not passed to the create_check() method.") + if not (target_alias or target_hostname): + raise exc.MonitoringCheckTargetNotSpecified("You must specify " + "either the 'target_alias' or 'target_hostname' when " + "creating a check.") + ctype = utils.get_id(check_type) + is_remote = ctype.startswith("remote") + monitoring_zones_poll = utils.coerce_string_to_list( + monitoring_zones_poll) + monitoring_zones_poll = [utils.get_id(mzp) + for mzp in monitoring_zones_poll] + if is_remote and not monitoring_zones_poll: + raise exc.MonitoringZonesPollMissing("You must specify the " + "'monitoring_zones_poll' parameter for remote checks.") + body = {"label": label or name, + "details": details, + "disabled": disabled, + "type": utils.get_id(check_type), + } + params = ("monitoring_zones_poll", "timeout", "period", + "target_alias", "target_hostname", "target_receiver") + body = _params_to_dict(params, body, locals()) + if test_only: + uri = "/%s/%s/test-check" % (self.uri_base, entity.id) + if include_debug: + uri = "%s?debug=true" % uri + else: + uri = "/%s/%s/checks" % (self.uri_base, entity.id) + try: + resp, resp_body = self.api.method_post(uri, body=body) + except exc.BadRequest as e: + msg = e.message + dtls = e.details + match = _invalid_key_pat.match(msg) + if match: + missing = match.groups()[0].replace("details.", "") + if missing in details: + errcls = exc.InvalidMonitoringCheckDetails + errmsg = "".join(["The value passed for '%s' in the ", + "details parameter is not valid."]) % missing + else: + errcls = exc.MissingMonitoringCheckDetails + errmsg = "".join(["The required value for the '%s' ", + "setting is missing from the 'details' ", + "parameter."]) % missing + raise errcls(errmsg) + else: + if msg == "Validation error": + # Info is in the 'details' + raise exc.InvalidMonitoringCheckDetails("Validation " + "failed. Error: '%s'." % dtls) + else: + status = resp["status"] + if status == "201": + check_id = resp["x-object-id"] + return self.get_check(entity, check_id) + + + def find_all_checks(self, entity, **kwargs): + """ + Finds all checks for a given entity with attributes matching + ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + found = [] + searches = kwargs.items() + for obj in self.list_checks(entity): + try: + if all(getattr(obj, attr) == value + for (attr, value) in searches): + found.append(obj) + except AttributeError: + continue + return found + + + def update_check(self, check, label=None, name=None, disabled=None, + metadata=None, monitoring_zones_poll=None, timeout=None, + period=None, target_alias=None, target_hostname=None, + target_receiver=None): + if monitoring_zones_poll: + monitoring_zones_poll = utils.coerce_string_to_list( + monitoring_zones_poll) + monitoring_zones_poll = [utils.get_id(mzp) + for mzp in monitoring_zones_poll] + body = {} + local_dict = locals() + label = label or name + params = ("label", "disabled", "metadata", "monitoring_zones_poll", + "timeout", "period", "target_alias", "target_hostname", + "target_receiver") + body = _params_to_dict(params, body, locals()) + entity = check.entity + uri = "/%s/%s/checks/%s" % (self.uri_base, utils.get_id(entity), + utils.get_id(check)) + try: + resp, resp_body = self.api.method_put(uri, body=body) + except exc.BadRequest as e: + msg = e.message + dtls = e.details + if msg.startswith("Validation error"): + raise exc.InvalidMonitoringCheckUpdate("The update failed " + "validation: %s: %s" % (msg, dtls)) + else: + # Some other issue. + raise + return resp_body + + + def get_check(self, entity, check): + """ + Returns the current version of the check for the entity. + """ + uri = "/%s/%s/checks/%s" % (self.uri_base, utils.get_id(entity), + utils.get_id(check)) + resp, resp_body = self.api.method_get(uri) + return CloudMonitorCheck(self, resp_body, entity) + + + def delete_check(self, entity, check): + """ + Deletes the specified check from the entity. + """ + uri = "/%s/%s/checks/%s" % (self.uri_base, utils.get_id(entity), + utils.get_id(check)) + resp, resp_body = self.api.method_delete(uri) + + + def list_metrics(self, entity, check): + """ + Returns a list of all the metrics associated with the specified check. + """ + uri = "/%s/%s/checks/%s/metrics" % (self.uri_base, + utils.get_id(entity), utils.get_id(check)) + resp, resp_body = self.api.method_get(uri) + metrics = [val["name"] + for val in resp_body["values"]] + return metrics + + + def get_metric_data_points(self, entity, check, metric, start, end, + points=None, resolution=None, stats=None): + """ + Returns the data points for a given metric for the given period. The + 'start' and 'end' times must be specified; they can be be either Python + date/datetime values, a string representing a date/datetime in either + of 'YYYY-MM-DD HH:MM:SS' or 'YYYY-MM-DD' formats, or a Unix timestamp: + + The 'points' parameter represents the number of points to return. The + 'resolution' parameter represents the granularity of the data. You must + specify either 'points' or 'resolution', but not both. The allowed + values for resolution are: 'FULL', 'MIN5', 'MIN20', 'MIN60', 'MIN240', + and 'MIN1440'. + + Finally, the 'stats' parameter specifies the stats you want returned. + By default only the 'average' is returned. You omit this parameter, + pass in a single value, or pass in a list of values. The allowed values + are: 'average', 'variance', 'min', and 'max' + """ + allowed_resolutions = ("FULL", "MIN5", "MIN20", "MIN60", "MIN240", + "MIN1440") + if not (points or resolution): + raise exc.MissingMonitoringCheckGranularity("You must specify " + "either the 'points' or 'resolution' parameter when " + "fetching metrics.") + if resolution: + if resolution.upper() not in allowed_resolutions: + raise exc.InvalidMonitoringMetricsResolution("The specified " + "resolution '%s' is not valid. The valid values are: " + "%s." % (resolution, str(allowed_resolutions))) + start_tm = utils.to_timestamp(start) + end_tm = utils.to_timestamp(end) + qparms = [] + qparms.append("from=%s" % int(start_tm)) + qparms.append("to=%s" % int(end_tm)) + if points: + qparms.append("points=%s" % points) + if resolution: + qparms.append("resolution=%s" % resolution.upper()) + if stats: + stats = utils.coerce_string_to_list(stats) + for stat in stats: + qparms.append("select=%s" % stat) + qparm = "&".join(qparms) + uri = "/%s/%s/checks/%s/metrics/%s/plot?%s" % (self.uri_base, + utils.get_id(entity), utils.get_id(check), metric, qparm) + try: + resp, resp_body = self.api.method_get(uri) + except exc.BadRequest as e: + msg = e.message + dtls = e.details + if msg.startswith("Validation error"): + raise exc.InvalidMonitoringMetricsRequest("Your request was " + "invalid: '%s'" % dtls) + else: + raise + return resp_body["values"] + + + def create_alarm(self, entity, check, notification_plan, criteria=None, + disabled=False, label=None, name=None, metadata=None): + """ + Creates an alarm that binds the check on the given entity with a + notification plan. + + Note that the 'criteria' parameter, if supplied, should be a string + representing the DSL for describing alerting conditions and their + output states. Pyrax does not do any validation of these criteria + statements; it is up to you as the developer to understand the language + and correctly form the statement. This alarm language is documented + online in the Cloud Monitoring section of http://docs.rackspace.com. + """ + uri = "/%s/%s/alarms" % (self.uri_base, utils.get_id(entity)) + body = {"check_id": utils.get_id(check), + "notification_plan_id": utils.get_id(notification_plan), + } + if criteria: + body["criteria"] = criteria + if disabled is not None: + body["disabled"] = disabled + label_name = label or name + if label_name: + body["label"] = label_name + if metadata: + body["metadata"] = metadata + resp, resp_body = self.api.method_post(uri, body=body) + + status = resp["status"] + if status == "201": + alarm_id = resp["x-object-id"] + return self.get_alarm(entity, alarm_id) + + def update_alarm(self, entity, alarm, criteria=None, disabled=False, + label=None, name=None, metadata=None): + """ + Updates an existing alarm on the given entity. See the comments on the + 'create_alarm()' regarding the criteria parameter. + """ + uri = "/%s/%s/alarms/%s" % (self.uri_base, utils.get_id(entity), + utils.get_id(alarm)) + body = {} + if criteria: + body["criteria"] = criteria + if disabled is not None: + body["disabled"] = disabled + label_name = label or name + if label_name: + body["label"] = label_name + if metadata: + body["metadata"] = metadata + resp, resp_body = self.api.method_put(uri, body=body) + + + def list_alarms(self, entity): + """ + Returns a list of all the alarms created on the specified entity. + """ + uri = "/%s/%s/alarms" % (self.uri_base, utils.get_id(entity)) + resp, resp_body = self.api.method_get(uri) + return [CloudMonitorAlarm(self, dct, entity) + for dct in resp_body["values"]] + + + def get_alarm(self, entity, alarm): + """ + Returns the alarm with the specified ID for this entity. If a + CloudMonitorAlarm instance is passed, returns a new CloudMonitorAlarm + object with the current state from the API. + """ + uri = "/%s/%s/alarms/%s" % (self.uri_base, utils.get_id(entity), + utils.get_id(alarm)) + resp, resp_body = self.api.method_get(uri) + return CloudMonitorAlarm(self, resp_body, entity) + + + def delete_alarm(self, entity, alarm): + """ + Deletes the specified alarm. + """ + uri = "/%s/%s/alarms/%s" % (self.uri_base, utils.get_id(entity), + utils.get_id(alarm)) + resp, resp_body = self.api.method_delete(uri) + + + +class CloudMonitorCheck(BaseResource): + """ + Represents a check defined for an entity. + """ + def __init__(self, manager, info, entity, key=None, loaded=False): + super(CloudMonitorCheck, self).__init__(manager, info, key=key, + loaded=loaded) + if not isinstance(entity, CloudMonitorEntity): + entity = manager.get(entity) + self.entity = entity + + + @property + def name(self): + return self.label + + + def get(self): + """Reloads the check with its current values.""" + new = self.manager.get_check(self.entity, self) + if new: + self._add_details(new._info) + + reload = get + + + def update(self, label=None, name=None, disabled=None, metadata=None, + monitoring_zones_poll=None, timeout=None, period=None, + target_alias=None, target_hostname=None, target_receiver=None): + """ + Updates an existing check with any of the parameters. + """ + self.manager.update_check(self, label=label, name=name, + disabled=disabled, metadata=metadata, + monitoring_zones_poll=monitoring_zones_poll, timeout=timeout, + period=period, target_alias=target_alias, + target_hostname=target_hostname, + target_receiver=target_receiver) + + + def delete(self): + """Removes this check from its entity.""" + self.manager.delete_check(self.entity, self) + + + def list_metrics(self): + """ + Returns a list of all the metrics associated with this check. + """ + return self.manager.list_metrics(self.entity, self) + + + def get_metric_data_points(self, metric, start, end, points=None, + resolution=None, stats=None): + """ + Returns the data points for a given metric for the given period. The + 'start' and 'end' times must be specified; they can be be either Python + date/datetime values, or a Unix timestamp. + + The 'points' parameter represents the number of points to return. The + 'resolution' parameter represents the granularity of the data. You must + specify either 'points' or 'resolution'. The allowed values for + resolution are: + FULL + MIN5 + MIN20 + MIN60 + MIN240 + MIN1440 + + Finally, the 'stats' parameter specifies the stats you want returned. + By default only the 'average' is returned. You omit this parameter, + pass in a single value, or pass in a list of values. The allowed values + are: + average + variance + min + max + """ + return self.manager.get_metric_data_points(self.entity, self, metric, + start, end, points=points, resolution=resolution, stats=stats) + + + def create_alarm(self, notification_plan, criteria=None, disabled=False, + label=None, name=None, metadata=None): + """ + Creates an alarm that binds this check with a notification plan. + """ + return self.manager.create_alarm(self.entity, self, notification_plan, + criteria=criteria, disabled=disabled, label=label, name=name, + metadata=metadata) + + + +class CloudMonitorCheckType(BaseResource): + """ + Represents the type of monitor check to be run. Each check type + """ + @property + def field_names(self): + """ + Returns a list of all field names for this check type. + """ + return [field["name"] for field in self.fields] + + + @property + def required_field_names(self): + """ + Returns a list of the names of all required fields for this check type. + """ + return [field["name"] for field in self.fields + if not field["optional"]] + + + @property + def optional_field_names(self): + """ + Returns a list of the names of all optional fields for this check type. + """ + return [field["name"] for field in self.fields + if field["optional"]] + + + +class CloudMonitorZone(BaseResource): + """ + Represents a location from which Cloud Monitoring collects data. + """ + @property + def name(self): + return self.label + + + +class CloudMonitorNotification(BaseResource): + """ + Represents an action to take when an alarm is triggered. + """ + @property + def name(self): + return self.label + + + def update(self, details): + """ + Updates this notification with the supplied details. + """ + return self.manager.update_notification(self, details) + + + +class CloudMonitorNotificationType(BaseResource): + """ + Represents a class of action to take when an alarm is triggered. + """ + @property + def name(self): + return self.label + + + +class CloudMonitorNotificationPlan(BaseResource): + """ + A Notification plan is a list of the notification actions to take when an + alarm is triggered. + """ + @property + def name(self): + return self.label + + + +class CloudMonitorAlarm(BaseResource): + """ + Alarms bind alerting rules, entities, and notification plans into a logical + unit. + """ + def __init__(self, manager, info, entity, key=None, loaded=False): + super(CloudMonitorAlarm, self).__init__(manager, info, key=key, + loaded=loaded) + if not isinstance(entity, CloudMonitorEntity): + entity = manager.get(entity) + self.entity = entity + + + def update(self, criteria=None, disabled=False, label=None, name=None, + metadata=None): + """ + Updates this alarm. + """ + return self.entity.update_alarm(self, criteria=criteria, + disabled=disabled, label=label, name=name, metadata=metadata) + + + def get(self): + """ + Fetches the current state of the alarm from the API and updates the + object. + """ + new_alarm = self.entity.get_alarm(self) + if new_alarm: + self._add_details(new_alarm._info) + # Alias reload() to get() + reload = get + + + @property + def name(self): + return self.label + + + +class CloudMonitorClient(BaseClient): + """ + This is the base client for creating and managing Cloud Monitoring. + """ + + def __init__(self, *args, **kwargs): + super(CloudMonitorClient, self).__init__(*args, **kwargs) + self.name = "Cloud Monitoring" + + + def _configure_manager(self): + """ + Creates the Manager instance to handle networks. + """ + self._entity_manager = CloudMonitorEntityManager(self, + uri_base="entities", resource_class=CloudMonitorEntity, + response_key=None, plural_response_key=None) + self._check_type_manager = BaseManager(self, + uri_base="check_types", resource_class=CloudMonitorCheckType, + response_key=None, plural_response_key=None) + self._monitoring_zone_manager = BaseManager(self, + uri_base="monitoring_zones", resource_class=CloudMonitorZone, + response_key=None, plural_response_key=None) + self._notification_manager = CloudMonitorNotificationManager(self, + uri_base="notifications", + resource_class=CloudMonitorNotification, + response_key=None, plural_response_key=None) + self._notification_plan_manager = CloudMonitorNotificationPlanManager( + self, uri_base="notification_plans", + resource_class=CloudMonitorNotificationPlan, + response_key=None, plural_response_key=None) + + + def get_account(self): + """ + Returns a dict with the following keys: id, webhook_token, and metadata. + """ + resp, resp_body = self.method_get("/account") + return resp_body + + + def get_limits(self): + """ + Returns a dict with the resource and rate limits for the account. + """ + resp, resp_body = self.method_get("/limits") + return resp_body + + + def get_audits(self): + """ + Every write operation performed against the API (PUT, POST or DELETE) + generates an audit record that is stored for 30 days. Audits record a + variety of information about the request including the method, URL, + headers, query string, transaction ID, the request body and the + response code. They also store information about the action performed + including a JSON list of the previous state of any modified objects. + For example, if you perform an update on an entity, this will record + the state of the entity before modification. + """ + resp, resp_body = self.method_get("/audits") + return resp_body["values"] + + + def list_entities(self): + return self._entity_manager.list() + + + def get_entity(self, entity): + return self._entity_manager.get(entity) + + + def create_entity(self, label=None, name=None, agent=None, + ip_addresses=None, metadata=None): + # NOTE: passing a non-None value for ip_addresses is required so that + # the _create_body() method can distinguish this as a request for a + # body dict for entities. + ip_addresses = ip_addresses or {} + resp = self._entity_manager.create(label=label, name=name, agent=agent, + ip_addresses=ip_addresses, metadata=metadata, + return_response=True) + status = resp["status"] + if status == "201": + ent_id = resp["x-object-id"] + return self.get_entity(ent_id) + + + def update_entity(self, entity, agent=None, metadata=None): + """ + Only the agent_id and metadata are able to be updated via the API. + """ + self._entity_manager.update_entity(entity, agent=agent, + metadata=metadata) + + + def delete_entity(self, entity): + """Deletes the specified entity.""" + self._entity_manager.delete(entity) + + + def list_check_types(self): + return self._check_type_manager.list() + + + def get_check_type(self, check_type): + return self._check_type_manager.get(check_type) + + + def list_checks(self, entity): + return self._entity_manager.list_checks(entity) + + + def create_check(self, entity, label=None, name=None, check_type=None, + disabled=False, metadata=None, details=None, + monitoring_zones_poll=None, timeout=None, period=None, + target_alias=None, target_hostname=None, target_receiver=None, + test_only=False, include_debug=False): + """ + Creates a check on the entity with the specified attributes. The + 'details' parameter should be a dict with the keys as the option name, + and the value as the desired setting. + """ + return self._entity_manager.create_check(entity, label=label, + name=name, check_type=check_type, disabled=disabled, + metadata=metadata, details=details, + monitoring_zones_poll=monitoring_zones_poll, timeout=timeout, + period=period, target_alias=target_alias, + target_hostname=target_hostname, + target_receiver=target_receiver, test_only=test_only, + include_debug=include_debug) + + + def get_check(self, entity, check): + """Returns the current check for the given entity.""" + return self._entity_manager.get_check(entity, check) + + + def find_all_checks(self, entity, **kwargs): + """ + Finds all checks for a given entity with attributes matching + ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + return self._entity_manager.find_all_checks(entity, **kwargs) + + + def update_check(self, entity, check, label=None, name=None, disabled=None, + metadata=None, monitoring_zones_poll=None, timeout=None, + period=None, target_alias=None, target_hostname=None, + target_receiver=None): + """ + Updates an existing check with any of the parameters. + """ + self._entity_manager.update_check(entity, check, label=label, + name=name, disabled=disabled, metadata=metadata, + monitoring_zones_poll=monitoring_zones_poll, timeout=timeout, + period=period, target_alias=target_alias, + target_hostname=target_hostname, + target_receiver=target_receiver) + + + def delete_check(self, entity, check): + """ + Deletes the specified check from the entity. + """ + return self._entity_manager.delete_check(entity, check) + + + def list_metrics(self, entity, check): + """ + Returns a list of all the metrics associated with the specified check. + """ + return self._entity_manager.list_metrics(entity, check) + + + def get_metric_data_points(self, entity, check, metric, start, end, + points=None, resolution=None, stats=None): + """ + Returns the data points for a given metric for the given period. The + 'start' and 'end' times must be specified; they can be be either Python + date/datetime values, or a Unix timestamp. + + The 'points' parameter represents the number of points to return. The + 'resolution' parameter represents the granularity of the data. You must + specify either 'points' or 'resolution'. The allowed values for + resolution are: + FULL + MIN5 + MIN20 + MIN60 + MIN240 + MIN1440 + + Finally, the 'stats' parameter specifies the stats you want returned. + By default only the 'average' is returned. You omit this parameter, + pass in a single value, or pass in a list of values. The allowed values + are: + average + variance + min + max + """ + return self._entity_manager.get_metric_data_points(entity, check, + metric, start, end, points=points, resolution=resolution, + stats=stats) + + + def list_notifications(self): + """Returns a list of all defined notifications.""" + return self._notification_manager.list() + + + def get_notification(self, notification_id): + """ + Returns the CloudMonitorNotification object for the specified ID. + """ + return self._notification_manager.get(notification_id) + + + def test_notification(self, notification=None, notification_type=None, + details=None): + """ + This allows you to test either an existing notification, or a potential + notification before creating it. The actual notification comes from the + same server where the actual alert messages come from. This allow you + to, among other things, verify that your firewall is configured + properly. + + To test an existing notification, pass it as the 'notification' + parameter and leave the other parameters empty. To pre-test a + notification before creating it, leave 'notification' empty, but pass + in the 'notification_type' and 'details'. + """ + return self._notification_manager.test_notification( + notification=notification, notification_type=notification_type, + details=details) + + + def create_notification(self, notification_type, label=None, name=None, + details=None): + """ + Defines a notification for handling an alarm. + """ + return self._notification_manager.create(notification_type, + label=label, name=name, details=details) + + + def update_notification(self, notification, details): + """ + Updates the specified notification with the supplied details. + """ + return self._notification_manager.update_notification(notification, + details) + + + def delete_notification(self, notification): + """ + Deletes the specified notification. + """ + return self._notification_manager.delete(notification) + + + def create_notification_plan(self, label=None, name=None, + critical_state=None, ok_state=None, warning_state=None): + """ + Creates a notification plan to be executed when a monitoring check + triggers an alarm. + """ + return self._notification_plan_manager.create(label=label, name=name, + critical_state=critical_state, ok_state=ok_state, + warning_state=warning_state) + + + def list_notification_plans(self): + """ + Returns a list of all defined notification plans. + """ + return self._notification_plan_manager.list() + + + def get_notification_plan(self, notification_plan_id): + """ + Returns the CloudMonitorNotificationPlan object for the specified ID. + """ + return self._notification_plan_manager.get(notification_plan_id) + + + def delete_notification_plan(self, notification_plan): + """ + Deletes the specified notification plan. + """ + return self._notification_plan_manager.delete(notification_plan) + + + def create_alarm(self, entity, check, notification_plan, criteria=None, + disabled=False, label=None, name=None, metadata=None): + """ + Creates an alarm that binds the check on the given entity with a + notification plan. + """ + return self._entity_manager.create_alarm(entity, check, + notification_plan, criteria=criteria, disabled=disabled, + label=label, name=name, metadata=metadata) + + + def update_alarm(self, entity, alarm, criteria=None, disabled=False, + label=None, name=None, metadata=None): + """ + Updates an existing alarm on the given entity. + """ + return self._entity_manager.update_alarm(entity, alarm, + criteria=criteria, disabled=disabled, label=label, name=name, + metadata=metadata) + + + def list_alarms(self, entity): + """ + Returns a list of all the alarms created on the specified entity. + """ + return self._entity_manager.list_alarms(entity) + + + def get_alarm(self, entity, alarm_id): + """ + Returns the alarm with the specified ID for the entity. + """ + return self._entity_manager.get_alarm(entity, alarm_id) + + + def delete_alarm(self, entity, alarm): + """ + Deletes the specified alarm. + """ + return self._entity_manager.delete_alarm(entity, alarm) + + + def list_notification_types(self): + return self._notification_manager.list_types() + + + def get_notification_type(self, nt_id): + return self._notification_manager.get_type(nt_id) + + + def list_monitoring_zones(self): + """ + Returns a list of all available monitoring zones. + """ + return self._monitoring_zone_manager.list() + + + def get_monitoring_zone(self, mz_id): + """ + Returns the monitoring zone for the given ID. + """ + return self._monitoring_zone_manager.get(mz_id) + + + ################################################################# + # The following methods are defined in the generic client class, + # but don't have meaning in monitoring, as there is not a single + # resource that defines this module. + ################################################################# + def list(self, limit=None, marker=None): + """Not applicable in Cloud Monitoring.""" + raise NotImplementedError + + def get(self, item): + """Not applicable in Cloud Monitoring.""" + raise NotImplementedError + + def create(self, *args, **kwargs): + """Not applicable in Cloud Monitoring.""" + raise NotImplementedError + + def delete(self, item): + """Not applicable in Cloud Monitoring.""" + raise NotImplementedError + + def find(self, **kwargs): + """Not applicable in Cloud Monitoring.""" + raise NotImplementedError + + def findall(self, **kwargs): + """Not applicable in Cloud Monitoring.""" + raise NotImplementedError + ################################################################# + + + def _create_body(self, name, label=None, agent=None, ip_addresses=None, + metadata=None): + """ + Used to create the dict required to create various resources. Accepts + either 'label' or 'name' as the keyword parameter for the label + attribute for entities. + """ + label = label or name + if ip_addresses is not None: + body = {"label": label} + if ip_addresses: + body["ip_addresses"] = ip_addresses + if agent: + body["agent_id"] = utils.get_id(agent) + if metadata: + body["metadata"] = metadata + return body diff --git a/awx/lib/site-packages/pyrax/cloudnetworks.py b/awx/lib/site-packages/pyrax/cloudnetworks.py new file mode 100644 index 0000000000..7c8b678f23 --- /dev/null +++ b/awx/lib/site-packages/pyrax/cloudnetworks.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2013 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from functools import wraps +from pyrax.client import BaseClient +import pyrax.exceptions as exc +from pyrax.manager import BaseManager +from pyrax.resource import BaseResource +import pyrax.utils as utils + +# Constants to represent the 'special' network IDs. +PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000" +SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111" +PSEUDO_NETWORKS = (PUBLIC_NET_ID, SERVICE_NET_ID) + + +def _get_server_networks(network, public=False, private=False): + net_id = utils.get_id(network) + ret = [{"net-id": net_id}] + if public: + ret.append({"net-id": PUBLIC_NET_ID}) + if private: + ret.append({"net-id": SERVICE_NET_ID}) + return ret + + + +class CloudNetwork(BaseResource): + """ + This represents a network in the cloud. It can be either an isolated + network, the public network, or the ServiceNet network. + + While resources generally use 'name' as the text identifier, the Cloud + Networks API uses 'label' instead. This module aliases the attributes andi + methods so that you can use either in your code. + """ + id = None + cidr = None + label = None + + + def _get_name(self): + return self.label + + def _set_name(self, name): + self.label = name + + name = property(_get_name, _set_name) + + + @property + def is_isolated(self): + """Returns True if this is a user-defined network.""" + return self.id not in PSEUDO_NETWORKS + + + def get(self): + if not self.is_isolated: + # These are placeholders, not actual networks + return + return super(CloudNetwork, self).get() + + + def delete(self): + """ + Wraps the standard delete() method to catch expected exceptions and + raise the appropriate pyrax exceptions. + """ + try: + return super(CloudNetwork, self).delete() + except exc.Forbidden as e: + # Network is in use + raise exc.NetworkInUse("Cannot delete a network in use by a server.") + + + def get_server_networks(self, public=False, private=False): + """ + Creates the dict of network UUIDs required by Cloud Servers when + creating a new server with isolated networks. + + By default only this network is included. If you wish to create a + server that has either the public (internet) or private (ServiceNet) + networks, you have to pass those parameters in with values of True. + """ + return _get_server_networks(self, public=public, private=private) + + + +class CloudNetworkManager(BaseManager): + """ + Does nothing special, but is used in testing. + """ + pass + + + +class CloudNetworkClient(BaseClient): + """ + This is the base client for creating and managing Cloud Networks. + """ + + def __init__(self, *args, **kwargs): + super(CloudNetworkClient, self).__init__(*args, **kwargs) + self.name = "Cloud Networks" + # Constants to represent the 'special' network IDs. + self.PUBLIC_NET_ID = PUBLIC_NET_ID + self.SERVICE_NET_ID = SERVICE_NET_ID + self.PSEUDO_NETWORKS = PSEUDO_NETWORKS + + + def _configure_manager(self): + """ + Creates the Manager instance to handle networks. + """ + self._manager = CloudNetworkManager(self, resource_class=CloudNetwork, + response_key="network", uri_base="os-networksv2") + + + def _create_body(self, name, label=None, cidr=None): + """ + Used to create the dict required to create a network. Accepts either + 'label' or 'name' as the keyword parameter for the label attribute. + """ + label = label or name + body = {"network": { + "label": label, + "cidr": cidr, + }} + return body + + + def create(self, label=None, name=None, cidr=None): + """ + Wraps the basic create() call to handle specific failures. + """ + try: + return super(CloudNetworkClient, self).create(label=label, + name=name, cidr=cidr) + except exc.BadRequest as e: + msg = e.message + if "too many networks" in msg: + raise exc.NetworkCountExceeded("Cannot create network; the " + "maximum number of isolated networks already exist.") + elif "does not contain enough" in msg: + raise exc.NetworkCIDRInvalid("Networks must contain two or " + "more hosts; the CIDR '%s' is too restrictive." % cidr) + elif "CIDR is malformed" in msg: + raise exc.NetworkCIDRMalformed("The CIDR '%s' is not valid." % cidr) + else: + # Something unexpected + raise + + + def delete(self, network): + """ + Wraps the standard delete() method to catch expected exceptions and + raise the appropriate pyrax exceptions. + """ + try: + return super(CloudNetworkClient, self).delete(network) + except exc.Forbidden as e: + # Network is in use + raise exc.NetworkInUse("Cannot delete a network in use by a server.") + + + def find_network_by_label(self, label): + """ + This is inefficient; it gets all the networks and then filters on + the client side to find the matching name. + """ + networks = self.list() + match = [network for network in networks + if network.label == label] + if not match: + raise exc.NetworkNotFound("No network with the label '%s' exists" % + label) + elif len(match) > 1: + raise exc.NetworkLabelNotUnique("There were %s matches for the label " + "'%s'." % (len(match), label)) + return match[0] + # Create an alias using 'name' + find_network_by_name = find_network_by_label + + + def get_server_networks(self, network, public=False, private=False): + """ + Creates the dict of network UUIDs required by Cloud Servers when + creating a new server with isolated networks. + + By default only the specified network is included. If you wish to + create a server that has either the public (internet) or private + (ServiceNet) networks, you have to pass those parameters in with + values of True. + """ + return _get_server_networks(network, public=public, private=private) diff --git a/awx/lib/site-packages/pyrax/exceptions.py b/awx/lib/site-packages/pyrax/exceptions.py new file mode 100644 index 0000000000..aad37ae4c1 --- /dev/null +++ b/awx/lib/site-packages/pyrax/exceptions.py @@ -0,0 +1,426 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Since we use the novaclient package, we need to expose its exception +# classes here. +from novaclient import exceptions as _nova_exceptions +ServerNotFound = _nova_exceptions.NotFound +ServerClientException = _nova_exceptions.ClientException + +class PyraxException(Exception): + pass + +class AccessListIDNotFound(PyraxException): + pass + +class AuthenticationFailed(PyraxException): + pass + +class AuthorizationFailure(PyraxException): + pass + +class AuthSystemNotFound(PyraxException): + pass + +class CDNFailed(PyraxException): + pass + +class DNSCallTimedOut(PyraxException): + pass + +class DomainCreationFailed(PyraxException): + pass + +class DomainDeletionFailed(PyraxException): + pass + +class DomainRecordAdditionFailed(PyraxException): + pass + +class DomainRecordDeletionFailed(PyraxException): + pass + +class DomainRecordNotFound(PyraxException): + pass + +class DomainRecordNotUnique(PyraxException): + pass + +class DomainRecordUpdateFailed(PyraxException): + pass + +class DomainUpdateFailed(PyraxException): + pass + +class DuplicateUser(PyraxException): + pass + +class EndpointNotDefined(PyraxException): + pass + +class EndpointNotFound(PyraxException): + pass + +class EnvironmentNotFound(PyraxException): + pass + +class FlavorNotFound(PyraxException): + pass + +class FileNotFound(PyraxException): + pass + +class FolderNotFound(PyraxException): + pass + +class KeyringModuleNotInstalled(PyraxException): + pass + +class KeyringPasswordNotFound(PyraxException): + pass + +class KeyringUsernameMissing(PyraxException): + pass + +class IdentityClassNotDefined(PyraxException): + pass + +class InvalidCDNMetadata(PyraxException): + pass + +class InvalidConfigurationFile(PyraxException): + pass + +class InvalidCredentialFile(PyraxException): + pass + +class InvalidDateTimeString(PyraxException): + pass + +class InvalidDeviceType(PyraxException): + pass + +class InvalidLoadBalancer(PyraxException): + pass + +class InvalidLoadBalancerParameters(PyraxException): + pass + +class InvalidMonitoringCheckDetails(PyraxException): + pass + +class InvalidMonitoringCheckUpdate(PyraxException): + pass + +class InvalidMonitoringMetricsRequest(PyraxException): + pass + +class InvalidMonitoringMetricsResolution(PyraxException): + pass + +class InvalidNodeCondition(PyraxException): + pass + +class InvalidNodeParameters(PyraxException): + pass + +class InvalidPTRRecord(PyraxException): + pass + +class InvalidSessionPersistenceType(PyraxException): + pass + +class InvalidSetting(PyraxException): + pass + +class InvalidSize(PyraxException): + pass + +class InvalidTemporaryURLMethod(PyraxException): + pass + +class InvalidUploadID(PyraxException): + pass + +class InvalidVirtualIPType(PyraxException): + pass + +class InvalidVirtualIPVersion(PyraxException): + pass + +class InvalidVolumeResize(PyraxException): + pass + +class MissingAuthSettings(PyraxException): + pass + +class MissingDNSSettings(PyraxException): + pass + +class MissingHealthMonitorSettings(PyraxException): + pass + +class MissingLoadBalancerParameters(PyraxException): + pass + +class MissingMonitoringCheckDetails(PyraxException): + pass + +class MissingMonitoringCheckGranularity(PyraxException): + pass + +class MissingName(PyraxException): + pass + +class MissingTemporaryURLKey(PyraxException): + pass + +class MonitoringCheckTargetNotSpecified(PyraxException): + pass + +class MonitoringZonesPollMissing(PyraxException): + pass + +class NetworkCIDRInvalid(PyraxException): + pass + +class NetworkCIDRMalformed(PyraxException): + pass + +class NetworkCountExceeded(PyraxException): + pass + +class NetworkInUse(PyraxException): + pass + +class NetworkNotFound(PyraxException): + pass + +class NetworkLabelNotUnique(PyraxException): + pass + +class NoMoreResults(PyraxException): + pass + +class NoReloadError(PyraxException): + pass + +class NoSSLTerminationConfiguration(PyraxException): + pass + +class NoSuchContainer(PyraxException): + pass + +class NoSuchDatabase(PyraxException): + pass + +class NoSuchDatabaseUser(PyraxException): + pass + +class NoSuchObject(PyraxException): + pass + +class NotAuthenticated(PyraxException): + pass + +class NotCDNEnabled(PyraxException): + pass + +class NoTokenLookupException(PyraxException): + pass + +class PasswordChangeFailed(PyraxException): + pass + +class ProtocolMismatch(PyraxException): + pass + +class PTRRecordCreationFailed(PyraxException): + pass + +class PTRRecordDeletionFailed(PyraxException): + pass + +class PTRRecordUpdateFailed(PyraxException): + pass + +class ServiceNotAvailable(PyraxException): + pass + +class SnapshotNotAvailable(PyraxException): + pass + +class TenantNotFound(PyraxException): + pass + +class UnattachedNode(PyraxException): + pass + +class UnattachedVirtualIP(PyraxException): + pass + +class UnicodePathError(PyraxException): + pass + +class UploadFailed(PyraxException): + pass + +class UserNotFound(PyraxException): + pass + +class VolumeAttachmentFailed(PyraxException): + pass + +class VolumeDetachmentFailed(PyraxException): + pass + +class VolumeNotAvailable(PyraxException): + pass + + +class AmbiguousEndpoints(PyraxException): + """Found more than one matching endpoint in Service Catalog.""" + def __init__(self, endpoints=None): + self.endpoints = endpoints + + def __str__(self): + return "AmbiguousEndpoints: %s" % repr(self.endpoints) + + +class ClientException(PyraxException): + """ + The base exception class for all exceptions this library raises. + """ + def __init__(self, code, message=None, details=None, request_id=None): + self.code = code + self.message = message or self.__class__.message + self.details = details + self.request_id = request_id + + def __str__(self): + formatted_string = "%s (HTTP %s)" % (self.message, self.code) + if self.request_id: + formatted_string += " (Request-ID: %s)" % self.request_id + + return formatted_string + +class BadRequest(ClientException): + """ + HTTP 400 - Bad request: you sent some malformed data. + """ + http_status = 400 + message = "Bad request" + + +class Unauthorized(ClientException): + """ + HTTP 401 - Unauthorized: bad credentials. + """ + http_status = 401 + message = "Unauthorized" + + +class Forbidden(ClientException): + """ + HTTP 403 - Forbidden: your credentials don't give you access to this + resource. + """ + http_status = 403 + message = "Forbidden" + + +class NotFound(ClientException): + """ + HTTP 404 - Not found + """ + http_status = 404 + message = "Not found" + + +class NoUniqueMatch(ClientException): + """ + HTTP 400 - Bad Request + """ + http_status = 400 + message = "Not Unique" + + +class OverLimit(ClientException): + """ + HTTP 413 - Over limit: you're over the API limits for this time period. + """ + http_status = 413 + message = "Over limit" + + +# NotImplemented is a python keyword. +class HTTPNotImplemented(ClientException): + """ + HTTP 501 - Not Implemented: the server does not support this operation. + """ + http_status = 501 + message = "Not Implemented" + + + +# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__() +# so we can do this: +# _code_map = dict((c.http_status, c) +# for c in ClientException.__subclasses__()) +# +# Instead, we have to hardcode it: +_code_map = dict((c.http_status, c) for c in [BadRequest, Unauthorized, + Forbidden, NotFound, OverLimit, HTTPNotImplemented]) + + +def from_response(response, body): + """ + Return an instance of a ClientException or subclass + based on an httplib2 response. + + Usage:: + + resp, body = http.request(...) + if resp.status != 200: + raise exception_from_response(resp, body) + """ + if isinstance(response, dict): + status = response.get("status") + else: + status = response.status + cls = _code_map.get(int(status), ClientException) + request_id = response.get("x-compute-request-id") + if body: + message = "n/a" + details = "n/a" + if isinstance(body, dict): + message = body.get("message") + details = body.get("details") + if message is details is None: + error = body[body.keys()[0]] + if isinstance(error, dict): + message = error.get("message", None) + details = error.get("details", None) + else: + message = error + details = None + return cls(code=status, message=message, details=details, + request_id=request_id) + else: + return cls(code=status, request_id=request_id) diff --git a/awx/lib/site-packages/pyrax/identity/__init__.py b/awx/lib/site-packages/pyrax/identity/__init__.py new file mode 100644 index 0000000000..f5ae21629d --- /dev/null +++ b/awx/lib/site-packages/pyrax/identity/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import glob +import os +opth = os.path + +fpath = opth.abspath(__file__) +path = opth.dirname(fpath) +pypath = opth.join(path, "*.py") +pyfiles = glob.glob(pypath) +fnames = [opth.basename(pyfile) for pyfile in pyfiles] +__all__ = [opth.splitext(fname)[0] for fname in fnames + if not fname.startswith("_")] diff --git a/awx/lib/site-packages/pyrax/identity/keystone_identity.py b/awx/lib/site-packages/pyrax/identity/keystone_identity.py new file mode 100644 index 0000000000..f0233820fd --- /dev/null +++ b/awx/lib/site-packages/pyrax/identity/keystone_identity.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import pyrax +from pyrax.base_identity import BaseAuth +import pyrax.exceptions as exc + + +class KeystoneIdentity(BaseAuth): + """ + Implements the Keystone-specific behaviors for Identity. In most + cases you will want to create specific subclasses to implement the + _get_auth_endpoint() method if you want to use something other + than the config file to control your auth endpoint. + """ + + _default_region = "RegionOne" + + def _get_auth_endpoint(self): + ep = pyrax.get_setting("auth_endpoint") + if ep is None: + raise exc.EndpointNotDefined("No auth enpoint has been specified.") + return ep diff --git a/awx/lib/site-packages/pyrax/identity/rax_identity.py b/awx/lib/site-packages/pyrax/identity/rax_identity.py new file mode 100644 index 0000000000..69961c2809 --- /dev/null +++ b/awx/lib/site-packages/pyrax/identity/rax_identity.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import ConfigParser + +import pyrax +from pyrax.base_identity import BaseAuth +from pyrax.base_identity import User +import pyrax.exceptions as exc +import pyrax.utils as utils + +AUTH_ENDPOINT = "https://identity.api.rackspacecloud.com/v2.0/" + + +class RaxIdentity(BaseAuth): + """ + This class handles all of the authentication requirements for working + with the Rackspace Cloud. + """ + default_region = "SYD" + + + def _get_auth_endpoint(self): + return AUTH_ENDPOINT + + + def _read_credential_file(self, cfg): + self.username = cfg.get("rackspace_cloud", "username") + try: + self.password = cfg.get("rackspace_cloud", "api_key", raw=True) + except ConfigParser.NoOptionError as e: + # Allow either the use of either 'api_key' or 'password'. + self.password = cfg.get("rackspace_cloud", "password", raw=True) + + + def _get_credentials(self): + """ + Returns the current credentials in the format expected by + the authentication service. Note that Rackspace credentials + expect 'api_key' instead of 'password'. + """ + return {"auth": {"RAX-KSKEY:apiKeyCredentials": + {"username": "%s" % self.username, + "apiKey": "%s" % self.password}}} + + + def auth_with_token(self, token, tenant_id=None, tenant_name=None): + """ + If a valid token is already known, this call will use it to generate + the service catalog. + """ + # Implementation note: + # Rackspace auth uses one tenant ID for the object_store services and + # another for everything else. The one that the user would know is the + # 'everything else' ID, so we need to extract the object_store tenant + # ID from the initial response, and call the superclass + # auth_with_token() method a second time with that tenant ID to get the + # object_store endpoints. We can then add these to the initial + # endpoints returned by the primary tenant ID, and then continue with + # the auth process. + main_resp = self._call_token_auth(token, tenant_id, tenant_name) + main_body = main_resp.json() + # Get the swift tenant ID + roles = main_body["access"]["user"]["roles"] + ostore = [role for role in roles + if role["name"] == "object-store:default"] + if ostore: + ostore_tenant_id = ostore[0]["tenantId"] + ostore_resp = self._call_token_auth(token, ostore_tenant_id, None) + ostore_body = ostore_resp.json() + ostore_cat = ostore_body["access"]["serviceCatalog"] + main_cat = main_body["access"]["serviceCatalog"] + main_cat.extend(ostore_cat) + self._parse_response(main_body) + self.authenticated = True + + + def _parse_response(self, resp): + """Gets the authentication information from the returned JSON.""" + super(RaxIdentity, self)._parse_response(resp) + user = resp["access"]["user"] + defreg = user.get("RAX-AUTH:defaultRegion") + if defreg: + self._default_region = defreg + + + def find_user_by_name(self, name): + """ + Returns a User object by searching for the supplied user name. Returns + None if there is no match for the given name. + """ + uri = "users?name=%s" % name + return self._find_user(uri) + + + def find_user_by_id(self, uid): + """ + Returns a User object by searching for the supplied user ID. Returns + None if there is no match for the given ID. + """ + uri = "users/%s" % uid + return self._find_user(uri) + + + def _find_user(self, uri): + """Handles the 'find' code for both name and ID searches.""" + resp = self.method_get(uri) + if resp.status_code in (403, 404): + return None + jusers = resp.json() + user_info = jusers["user"] + return User(self, user_info) + + + def update_user(self, user, email=None, username=None, + uid=None, defaultRegion=None, enabled=None): + user_id = utils.get_id(user) + uri = "users/%s" % user_id + upd = {"id": user_id} + if email is not None: + upd["email"] = email + if defaultRegion is not None: + upd["RAX-AUTH:defaultRegion"] = defaultRegion + if username is not None: + upd["username"] = username + if enabled is not None: + upd["enabled"] = enabled + data = {"user": upd} + resp = self.method_put(uri, data=data) + return User(self, resp.json()) + + + def list_credentials(self, user): + """ + Returns a user's non-password credentials. + """ + user_id = utils.get_id(user) + uri = "users/%s/OS-KSADM/credentials" % user_id + return self.method_get(uri) + + + def get_user_credentials(self, user): + """ + Returns a user's non-password credentials. + """ + user_id = utils.get_id(user) + base_uri = "users/%s/OS-KSADM/credentials/RAX-KSKEY:apiKeyCredentials" + uri = base_uri % user_id + return self.method_get(uri) diff --git a/awx/lib/site-packages/pyrax/manager.py b/awx/lib/site-packages/pyrax/manager.py new file mode 100644 index 0000000000..3e55acd34b --- /dev/null +++ b/awx/lib/site-packages/pyrax/manager.py @@ -0,0 +1,251 @@ +# Copyright 2010 Jacob Kaplan-Moss + +# Copyright 2011 OpenStack LLC. +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base utilities to build API operation managers and objects on top of. +""" + +import contextlib +import hashlib +import os + +import pyrax.exceptions as exc +import pyrax.utils as utils + + +# Python 2.4 compat +try: + all +except NameError: + def all(iterable): + return True not in (not x for x in iterable) + + + +class BaseManager(object): + """ + Managers interact with a particular type of API (servers, databases, dns, + etc.) and provide CRUD operations for them. + """ + resource_class = None + response_key = None + plural_response_key = None + uri_base = None + _hooks_map = {} + + + def __init__(self, api, resource_class=None, response_key=None, + plural_response_key=None, uri_base=None): + self.api = api + self.resource_class = resource_class + self.response_key = response_key + self.plural_response_key = plural_response_key + if plural_response_key is None and response_key is not None: + # Default to adding 's' + self.plural_response_key = "%ss" % response_key + self.uri_base = uri_base + + + def list(self, limit=None, marker=None): + """Gets a list of all items.""" + uri = "/%s" % self.uri_base + pagination_items = [] + if limit is not None: + pagination_items.append("limit=%s" % limit) + if marker is not None: + pagination_items.append("marker=%s" % marker) + pagination = "&".join(pagination_items) + if pagination: + uri = "%s?%s" % (uri, pagination) + return self._list(uri) + + + def get(self, item): + """Gets a specific item.""" + uri = "/%s/%s" % (self.uri_base, utils.get_id(item)) + return self._get(uri) + + + def create(self, name, *args, **kwargs): + """ + Subclasses need to implement the _create_body() method to return a dict + that will be used for the API request body. + + For cases where no response is returned from the API on creation, pass + `return_none=True` so that the _create method doesn't expect one. + + For cases where you do not want the _create method to attempt to parse + the response, but instead have it returned directly, pass + `return_raw=True`. + + For cases where the API returns information in the response and not the + response_body, pass `return_response=True`. + """ + return_none = kwargs.pop("return_none", False) + return_raw = kwargs.pop("return_raw", False) + return_response = kwargs.pop("return_response", False) + body = self.api._create_body(name, *args, **kwargs) + return self._create("/%s" % self.uri_base, body, + return_none=return_none, return_raw=return_raw, + return_response=return_response) + + + def delete(self, item): + """Deletes the specified item.""" + uri = "/%s/%s" % (self.uri_base, utils.get_id(item)) + return self._delete(uri) + + + def _list(self, uri, obj_class=None, body=None): + """ + Handles the communication with the API when getting + a full listing of the resources managed by this class. + """ + if body: + resp, resp_body = self.api.method_post(uri, body=body) + else: + resp, resp_body = self.api.method_get(uri) + + if obj_class is None: + obj_class = self.resource_class + + data = resp_body.get(self.plural_response_key, resp_body) + # NOTE(ja): keystone returns values as list as {"values": [ ... ]} + # unlike other services which just return the list... + if isinstance(data, dict): + try: + data = data["values"] + except KeyError: + pass + return [obj_class(self, res, loaded=False) + for res in data if res] + + + def _get(self, uri): + """ + Handles the communication with the API when getting + a specific resource managed by this class. + """ + resp, resp_body = self.api.method_get(uri) + return self.resource_class(self, resp_body, self.response_key, + loaded=True) + + + def _create(self, uri, body, return_none=False, return_raw=False, + return_response=None, **kwargs): + """ + Handles the communication with the API when creating a new + resource managed by this class. + """ + self.run_hooks("modify_body_for_create", body, **kwargs) + resp, resp_body = self.api.method_post(uri, body=body) + if return_none: + # No response body + return + elif return_response: + return resp + elif return_raw: + if self.response_key: + return resp_body[self.response_key] + else: + return resp_body + return self.resource_class(self, resp_body, self.response_key) + + + def _delete(self, uri): + """ + Handles the communication with the API when deleting + a specific resource managed by this class. + """ + _resp, _body = self.api.method_delete(uri) + + + def _update(self, uri, body, **kwargs): + """ + Handles the communication with the API when updating + a specific resource managed by this class. + """ + self.run_hooks("modify_body_for_update", body, **kwargs) + resp, resp_body = self.api.method_put(uri, body=body) + return resp_body + + + def action(self, item, action_type, body={}): + """ + Several API calls are lumped under the 'action' API. This + is the generic handler for such calls. + """ + uri = "/%s/%s/action" % (self.uri_base, utils.get_id(item)) + action_body = {action_type: body} + return self.api.method_post(uri, body=action_body) + + + def find(self, **kwargs): + """ + Finds a single item with attributes matching ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + matches = self.findall(**kwargs) + num_matches = len(matches) + if not num_matches: + msg = "No %s matching: %s." % (self.resource_class.__name__, kwargs) + raise exc.NotFound(404, msg) + if num_matches > 1: + msg = "More than one %s matching: %s." % ( + self.resource_class.__name__, kwargs) + raise exc.NoUniqueMatch(400, msg) + else: + return matches[0] + + + def findall(self, **kwargs): + """ + Finds all items with attributes matching ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + found = [] + searches = kwargs.items() + + for obj in self.list(): + try: + if all(getattr(obj, attr) == value + for (attr, value) in searches): + found.append(obj) + except AttributeError: + continue + return found + + + @classmethod + def add_hook(cls, hook_type, hook_func): + if hook_type not in cls._hooks_map: + cls._hooks_map[hook_type] = [] + + cls._hooks_map[hook_type].append(hook_func) + + + @classmethod + def run_hooks(cls, hook_type, *args, **kwargs): + hook_funcs = cls._hooks_map.get(hook_type) or [] + for hook_func in hook_funcs: + hook_func(*args, **kwargs) diff --git a/awx/lib/site-packages/pyrax/resource.py b/awx/lib/site-packages/pyrax/resource.py new file mode 100644 index 0000000000..52c292fbad --- /dev/null +++ b/awx/lib/site-packages/pyrax/resource.py @@ -0,0 +1,145 @@ +# Copyright 2010 Jacob Kaplan-Moss + +# Copyright 2011 OpenStack LLC. +# Copyright 2012 Rackspace + +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base utilities to build API operation managers and objects on top of. +""" + +import pyrax +import pyrax.utils as utils + + +class BaseResource(object): + """ + A resource represents a particular instance of an object (server, flavor, + etc). This is pretty much just a bag for attributes. + """ + HUMAN_ID = False + NAME_ATTR = "name" + # Some resource do not have any additional details to lazy load, + # so skip the unneeded API call by setting this to False. + get_details = True + # Atts not to display when showing the __repr__() + _non_display = [] + # Properties to add to the __repr__() display + _repr_properties = [] + + + def __init__(self, manager, info, key=None, loaded=False): + self._loaded = loaded + self.manager = manager + if key: + info = info[key] + self._info = info + self._add_details(info) + + + @property + def human_id(self): + """Subclasses may override this to provide a pretty ID which can be used + for bash completion. + """ + if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID: + return utils.slugify(getattr(self, self.NAME_ATTR)) + return None + + + def _add_details(self, info): + """ + Takes the dict returned by the API call and sets the + corresponding attributes on the object. + """ + for (key, val) in info.iteritems(): + if isinstance(key, unicode): + key = key.encode(pyrax.get_encoding()) + setattr(self, key, val) + + + def __getattr__(self, key): + """ + Many objects are lazy-loaded: only their most basic details + are initially returned. The first time any of the other attributes + are referenced, a GET is made to get the full details for the + object. + """ + if not self.loaded: + self.get() + # Attribute should be set; if not, it's not valid + try: + return self.__dict__[key] + except KeyError: + raise AttributeError("'%s' object has no attribute " + "'%s'." % (self.__class__, key)) + + + def __repr__(self): + reprkeys = sorted(key for key in self.__dict__.keys() + if (key[0] != "_") + and (key not in ("manager", "created", "updated")) + and (key not in self._non_display)) + reprkeys += self._repr_properties + info = ", ".join("%s=%s" % (key, getattr(self, key)) + for key in reprkeys) + return "<%s %s>" % (self.__class__.__name__, info) + + + def get(self): + """Gets the details for the object.""" + # set 'loaded' first ... so if we have to bail, we know we tried. + self.loaded = True + if not hasattr(self.manager, "get"): + return + if not self.get_details: + return + new = self.manager.get(self) + if new: + self._add_details(new._info) + # This alias is used to make its purpose clearer. + reload = get + + + def delete(self): + """Deletes the object.""" + # set 'loaded' first ... so if we have to bail, we know we tried. + self.loaded = True + if not hasattr(self.manager, "delete"): + return + self.manager.delete(self) + + + def __eq__(self, other): + """ + Two resource objects that represent the same entity in the cloud + should be considered equal if they have the same ID. If they + don't have IDs, but their attribute info matches, they are equal. + """ + if not isinstance(other, self.__class__): + return False + if hasattr(self, "id") and hasattr(other, "id"): + return self.id == other.id + return self._info == other._info + + + def _get_loaded(self): + return self._loaded + + def _set_loaded(self, val): + self._loaded = val + + loaded = property(_get_loaded, _set_loaded) diff --git a/awx/lib/site-packages/pyrax/service_catalog.py b/awx/lib/site-packages/pyrax/service_catalog.py new file mode 100644 index 0000000000..5133f92e22 --- /dev/null +++ b/awx/lib/site-packages/pyrax/service_catalog.py @@ -0,0 +1,60 @@ +# Copyright 2011 OpenStack LLC. +# Copyright 2011, Piston Cloud Computing, Inc. +# Copyright 2012, Rackspace +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pyrax.exceptions as exc + + +class ServiceCatalog(object): + """Helper methods for dealing with a Keystone Service Catalog.""" + + def __init__(self, resource_dict): + self.catalog = resource_dict + + def get_token(self): + """Extracts and returns the authentication token.""" + return self.catalog["access"]["token"]["id"] + + def url_for(self, attr=None, filter_value=None, + service_type=None, endpoint_type="publicURL", + service_name=None, volume_service_name=None): + """Fetches the public URL from the given service for + a particular endpoint attribute. If none given, returns + the first. See tests for sample service catalog.""" + matching_endpoints = [] + # We don't always get a service catalog back ... + if not "serviceCatalog" in self.catalog["access"]: + return None + + # Full catalog ... + catalog = self.catalog["access"]["serviceCatalog"] + for service in catalog: + if service.get("type") != service_type: + continue + endpoints = service["endpoints"] + for endpoint in endpoints: + if not filter_value or endpoint.get(attr) == filter_value: + endpoint["serviceName"] = service.get("name") + matching_endpoints.append(endpoint) + + if not matching_endpoints: + raise exc.EndpointNotFound() + elif len(matching_endpoints) > 1: + raise exc.AmbiguousEndpoints(endpoints=matching_endpoints) + else: + return matching_endpoints[0][endpoint_type] diff --git a/awx/lib/site-packages/pyrax/utils.py b/awx/lib/site-packages/pyrax/utils.py new file mode 100644 index 0000000000..cc137e3d49 --- /dev/null +++ b/awx/lib/site-packages/pyrax/utils.py @@ -0,0 +1,572 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import datetime +import fnmatch +import hashlib +import os +import random +import re +import shutil +import string +from subprocess import Popen, PIPE +import sys +import tempfile +import threading +import time +import types + +try: + import pudb +except ImportError: + import pdb as pudb +trace = pudb.set_trace + +import pyrax +import pyrax.exceptions as exc + + +def runproc(cmd): + """ + Convenience method for executing operating system commands. + + Accepts a single string that would be the command as executed on the + command line. + + Returns a 2-tuple consisting of the output of (STDOUT, STDERR). In your + code you should check for an empty STDERR output to determine if your + command completed successfully. + """ + proc = Popen([cmd], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, + close_fds=True) + stdoutdata, stderrdata = proc.communicate() + return (stdoutdata, stderrdata) + + +class SelfDeletingTempfile(object): + """ + Convenience class for dealing with temporary files. + + The temp file is created in a secure fashion, and is + automatically deleted when the context manager exits. + + Usage: + + \code + with SelfDeletingTempfile() as tmp: + tmp.write( ... ) + some_func(tmp) + # More code + # At this point, the tempfile has been erased. + \endcode + """ + name = None + + def __enter__(self): + fd, self.name = tempfile.mkstemp() + os.close(fd) + return self.name + + def __exit__(self, type, value, traceback): + os.unlink(self.name) + + +class SelfDeletingTempDirectory(object): + """ + Convenience class for dealing with temporary folders and the + files within them. + + The temp folder is created in a secure fashion, and is + automatically deleted when the context manager exits, along + with any files that may be contained within. When you + instantiate this class, you receive the full path to the + temporary directory. + + Usage: + + \code + with SelfDeletingTempDirectory() as tmpdir: + f1 = open(os.path.join(tmpdir, "my_file.txt", "w") + f1.write("blah...") + f1.close() + some_func(tmpdir) + # More code + # At this point, the directory 'tmpdir' has been deleted, + # as well as the file 'f1' within it. + \endcode + """ + name = None + + def __enter__(self): + self.name = tempfile.mkdtemp() + return self.name + + def __exit__(self, type, value, traceback): + shutil.rmtree(self.name) + + +def get_checksum(content, encoding="utf8", block_size=8192): + """ + Returns the MD5 checksum in hex for the given content. If 'content' + is a file-like object, the content will be obtained from its read() + method. If 'content' is a file path, that file is read and its + contents used. Otherwise, 'content' is assumed to be the string whose + checksum is desired. If the content is unicode, it will be encoded + using the specified encoding. + + To conserve memory, files and file-like objects will be read in blocks, + with the default block size of 8192 bytes, which is 64 * the digest block + size of md5 (128). This is optimal for most cases, but you can change this + by passing in a different value for `block_size`. + """ + md = hashlib.md5() + + def safe_update(txt): + try: + md.update(txt) + except UnicodeEncodeError: + md.update(txt.encode(encoding)) + + try: + isfile = os.path.isfile(content) + except TypeError: + # Will happen with binary content. + isfile = False + if isfile: + with open(content, "rb") as ff: + txt = ff.read(block_size) + while txt: + safe_update(txt) + txt = ff.read(block_size) + elif hasattr(content, "read"): + pos = content.tell() + content.seek(0) + txt = content.read(block_size) + while txt: + safe_update(txt) + txt = content.read(block_size) + content.seek(pos) + else: + safe_update(content) + return md.hexdigest() + + +def random_name(length=20, ascii_only=False): + """ + Generates a random name; useful for testing. + + By default it will return an encoded string containing + unicode values up to code point 1000. If you only + need or want ASCII values, pass True to the + ascii_only parameter. + """ + if ascii_only: + base_chars = string.ascii_letters + else: + def get_char(): + return unichr(random.randint(32, 1000)) + base_chars = u"".join([get_char() for ii in xrange(length)]) + mult = (length / len(base_chars)) + 1 + chars = base_chars * mult + return "".join(random.sample(chars, length)) + + +def coerce_string_to_list(val): + """ + For parameters that can take either a single string or a list of strings, + this function will ensure that the result is a list containing the passed + values. + """ + if val: + if not isinstance(val, (list, tuple)): + val = [val] + else: + val = [] + return val + + +def folder_size(pth, ignore=None): + """ + Returns the total bytes for the specified path, optionally ignoring + any files which match the 'ignore' parameter. 'ignore' can either be + a single string pattern, or a list of such patterns. + """ + if not os.path.isdir(pth): + raise exc.FolderNotFound + + ignore = coerce_string_to_list(ignore) + + def get_size(total, root, names): + paths = [os.path.realpath(os.path.join(root, nm)) for nm in names] + for pth in paths[::-1]: + if not os.path.exists(pth): + paths.remove(pth) + if os.path.isdir(pth): + # Don't count folder stat sizes + paths.remove(pth) + if match_pattern(pth, ignore): + paths.remove(pth) + total[0] += sum(os.stat(pth).st_size for pth in paths) + + # Need a mutable to pass + total = [0] + os.path.walk(pth, get_size, total) + return total[0] + + +def add_method(obj, func, name=None): + """Adds an instance method to an object.""" + if name is None: + name = func.func_name + method = types.MethodType(func, obj, obj.__class__) + setattr(obj, name, method) + + +class _WaitThread(threading.Thread): + """ + Threading class to wait for object status in the background. Note that + verbose will always be False for a background thread. + """ + def __init__(self, obj, att, desired, callback, interval, attempts, + verbose, verbose_atts): + self.obj = obj + self.att = att + self.desired = desired + self.callback = callback + self.interval = interval + self.attempts = attempts + self.verbose = verbose + threading.Thread.__init__(self) + + def run(self): + """Starts the thread.""" + resp = _wait_until(obj=self.obj, att=self.att, + desired=self.desired, callback=None, + interval=self.interval, attempts=self.attempts, + verbose=False, verbose_atts=None) + self.callback(resp) + + +def wait_until(obj, att, desired, callback=None, interval=5, attempts=0, + verbose=False, verbose_atts=None): + """ + When changing the state of an object, it will commonly be in a transitional + state until the change is complete. This will reload the object every + `interval` seconds, and check its `att` attribute until the `desired` value + is reached, or until the maximum number of attempts is reached. The updated + object is returned. It is up to the calling program to check the returned + object to make sure that it successfully reached the desired state. + + Once the desired value of the attribute is reached, the method returns. If + not, it will re-try until the attribute's value matches one of the + `desired` values. By default (attempts=0) it will loop infinitely until the + attribute reaches the desired value. You can optionally limit the number of + times that the object is reloaded by passing a positive value to + `attempts`. If the attribute has not reached the desired value by then, the + method will exit. + + If `verbose` is True, each attempt will print out the current value of the + watched attribute and the time that has elapsed since the original request. + Also, if `verbose_atts` is specified, the values of those attributes will + also be output. If `verbose` is False, then `verbose_atts` has no effect. + + Note that `desired` can be a list of values; if the attribute becomes equal + to any of those values, this will succeed. For example, when creating a new + cloud server, it will initially have a status of 'BUILD', and you can't + work with it until its status is 'ACTIVE'. However, there might be a + problem with the build process, and the server will change to a status of + 'ERROR'. So for this case you need to set the `desired` parameter to + `['ACTIVE', 'ERROR']`. If you simply pass 'ACTIVE' as the desired state, + this will loop indefinitely if a build fails, as the server will never + reach a status of 'ACTIVE'. + + Since this process of waiting can take a potentially long time, and will + block your program's execution until the desired state of the object is + reached, you may specify a callback function. The callback can be any + callable that accepts a single parameter; the parameter it receives will be + either the updated object (success), or None (failure). If a callback is + specified, the program will return immediately after spawning the wait + process in a separate thread. + """ + if callback: + waiter = _WaitThread(obj=obj, att=att, desired=desired, callback=callback, + interval=interval, attempts=attempts, verbose=verbose, + verbose_atts=verbose_atts) + waiter.start() + return waiter + else: + return _wait_until(obj=obj, att=att, desired=desired, callback=None, + interval=interval, attempts=attempts, verbose=verbose, + verbose_atts=verbose_atts) + + +def _wait_until(obj, att, desired, callback, interval, attempts, verbose, + verbose_atts): + """ + Loops until either the desired value of the attribute is reached, or the + number of attempts is exceeded. + """ + if not isinstance(desired, (list, tuple)): + desired = [desired] + if verbose_atts is None: + verbose_atts = [] + if not isinstance(verbose_atts, (list, tuple)): + verbose_atts = [verbose_atts] + infinite = (attempts == 0) + attempt = 0 + start = time.time() + while infinite or (attempt < attempts): + try: + obj.reload() + except AttributeError: + # This will happen with cloudservers and cloudfiles, which + # use different client/resource classes. + try: + # For servers: + obj.get() + except AttributeError: + try: + # For other objects that don't support .get() or .reload() + obj = obj.manager.get(obj.id) + except AttributeError: + # punt + raise exc.NoReloadError("The 'wait_until' method is not " + "supported for '%s' objects." % obj.__class__) + attval = getattr(obj, att) + if verbose: + elapsed = time.time() - start + msgs = ["Current value of %s: %s (elapsed: %4.1f seconds)" % ( + att, attval, elapsed)] + for vatt in verbose_atts: + vattval = getattr(obj, vatt, None) + msgs.append("%s=%s" % (vatt, vattval)) + print " ".join(msgs) + if attval in desired: + return obj + time.sleep(interval) + attempt += 1 + return obj + + + +def wait_for_build(obj, att=None, desired=None, callback=None, interval=None, + attempts=None, verbose=None, verbose_atts=None): + """ + Designed to handle the most common use case for wait_until: an object whose + 'status' attribute will end up in either 'ACTIVE' or 'ERROR' state. Since + builds don't happen very quickly, the interval will default to 20 seconds + to avoid excess polling. + """ + att = att or "status" + desired = desired or ["ACTIVE", "ERROR"] + interval = interval or 20 + attempts = attempts or 0 + verbose_atts = verbose_atts or "progress" + return wait_until(obj, att, desired, callback=callback, interval=interval, + attempts=attempts, verbose=verbose, verbose_atts=verbose_atts) + + +def _parse_datetime_string(val): + """ + Attempts to parse a string representation of a date or datetime value, and + returns a datetime if successful. If not, a InvalidDateTimeString exception + will be raised. + """ + dt = None + lenval = len(val) + fmt = {19: "%Y-%m-%d %H:%M:%S", 10: "%Y-%m-%d"}.get(lenval) + if fmt is None: + # Invalid date + raise exc.InvalidDateTimeString("The supplied value '%s' does not " + "match either of the formats 'YYYY-MM-DD HH:MM:SS' or " + "'YYYY-MM-DD'." % val) + return datetime.datetime.strptime(val, fmt) + + +def iso_time_string(val, show_tzinfo=False): + """ + Takes either a date, datetime or a string, and returns the standard ISO + formatted string for that date/time, with any fractional second portion + removed. + """ + if not val: + return "" + if isinstance(val, basestring): + dt = _parse_datetime_string(val) + else: + dt = val + if not isinstance(dt, datetime.datetime): + dt = datetime.datetime.fromordinal(dt.toordinal()) + has_tz = (dt.tzinfo is not None) + if show_tzinfo and has_tz: + # Need to remove the colon in the TZ portion + ret = "".join(dt.isoformat().rsplit(":", 1)) + elif show_tzinfo and not has_tz: + ret = "%s+0000" % dt.isoformat().split(".")[0] + elif not show_tzinfo and has_tz: + ret = dt.isoformat()[:-6] + elif not show_tzinfo and not has_tz: + ret = dt.isoformat().split(".")[0] + return ret + + +def to_timestamp(val): + """ + Takes a value that is either a Python date, datetime, or a string + representation of a date/datetime value. Returns a standard Unix timestamp + corresponding to that value. + """ + if isinstance(val, basestring): + dt = _parse_datetime_string(val) + else: + dt = val + return time.mktime(dt.timetuple()) + + +def get_id(id_or_obj): + """ + Returns the 'id' attribute of 'id_or_obj' if present; if not, + returns 'id_or_obj'. + """ + if isinstance(id_or_obj, (basestring, int)): + # It's an ID + return id_or_obj + try: + return id_or_obj.id + except AttributeError: + return id_or_obj + + +def get_name(name_or_obj): + """ + Returns the 'name' attribute of 'name_or_obj' if present; if not, + returns 'name_or_obj'. + """ + if isinstance(name_or_obj, basestring): + # It's a name + return name_or_obj + try: + return name_or_obj.name + except AttributeError: + raise exc.MissingName(name_or_obj) + + +def params_to_dict(params, dct, local_dict): + """ + Given a set of optional parameter names, constructs a dictionary with the + parameter name as the key, and the value for that key in the local_dict as + the value, for all non-None values. + """ + for param in params: + val = local_dict.get(param) + if val is None: + continue + dct[param] = val + return dct + + +def match_pattern(nm, patterns): + """ + Compares `nm` with the supplied patterns, and returns True if it matches + at least one. + + Patterns are standard file-name wildcard strings, as defined in the + `fnmatch` module. For example, the pattern "*.py" will match the names + of all Python scripts. + """ + patterns = coerce_string_to_list(patterns) + for pat in patterns: + if fnmatch.fnmatch(nm, pat): + return True + return False + + +def update_exc(exc, msg, before=True, separator="\n"): + """ + Adds additional text to an exception's error message. + + The new text will be added before the existing text by default; to append + it after the original text, pass False to the `before` parameter. + + By default the old and new text will be separated by a newline. If you wish + to use a different separator, pass that as the `separator` parameter. + """ + emsg = exc.message + if before: + parts = (msg, separator, emsg) + else: + parts = (emsg, separator, msg) + new_msg = "%s%s%s" % parts + new_args = (new_msg, ) + exc.args[1:] + exc.message = new_msg + exc.args = new_args + return exc + + +def env(*args, **kwargs): + """ + Returns the first environment variable set + if none are non-empty, defaults to "" or keyword arg default + """ + for arg in args: + value = os.environ.get(arg, None) + if value: + return value + return kwargs.get("default", "") + + +def unauthenticated(fnc): + """ + Adds 'unauthenticated' attribute to decorated function. + Usage: + @unauthenticated + def mymethod(fnc): + ... + """ + fnc.unauthenticated = True + return fnc + + +def isunauthenticated(fnc): + """ + Checks to see if the function is marked as not requiring authentication + with the @unauthenticated decorator. Returns True if decorator is + set to True, False otherwise. + """ + return getattr(fnc, "unauthenticated", False) + + +def safe_issubclass(*args): + """Like issubclass, but will just return False if not a class.""" + try: + if issubclass(*args): + return True + except TypeError: + pass + return False + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition(".") + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + + +# http://code.activestate.com/recipes/ +# 577257-slugify-make-a-string-usable-in-a-url-or-filename/ +def slugify(value): + """ + Normalizes string, converts to lowercase, removes non-alpha characters, + and converts spaces to hyphens. + + From Django's "django/template/defaultfilters.py". + """ + import unicodedata + _slugify_strip_re = re.compile(r"[^\w\s-]") + _slugify_hyphenate_re = re.compile(r"[-\s]+") + if not isinstance(value, unicode): + value = unicode(value) + value = unicodedata.normalize("NFKD", value).encode("ascii", "ignore") + value = unicode(_slugify_strip_re.sub("", value).strip().lower()) + return _slugify_hyphenate_re.sub("-", value) diff --git a/awx/lib/site-packages/pyrax/version.py b/awx/lib/site-packages/pyrax/version.py new file mode 100644 index 0000000000..72d97b5ea1 --- /dev/null +++ b/awx/lib/site-packages/pyrax/version.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +version = "1.5.0" diff --git a/awx/lib/site-packages/pytz/__init__.py b/awx/lib/site-packages/pytz/__init__.py index 1975ea62e6..08c2ff7234 100644 --- a/awx/lib/site-packages/pytz/__init__.py +++ b/awx/lib/site-packages/pytz/__init__.py @@ -9,7 +9,7 @@ on how to use these modules. ''' # The Olson database is updated several times a year. -OLSON_VERSION = '2013b' +OLSON_VERSION = '2013d' VERSION = OLSON_VERSION # Version format for a patch release - only one so far. #VERSION = OLSON_VERSION + '.2' @@ -26,10 +26,6 @@ __all__ = [ ] import sys, datetime, os.path, gettext -try: - from UserDict import DictMixin -except ImportError: - from collections import Mapping as DictMixin try: from pkg_resources import resource_stream @@ -40,6 +36,7 @@ from pytz.exceptions import AmbiguousTimeError from pytz.exceptions import InvalidTimeError from pytz.exceptions import NonExistentTimeError from pytz.exceptions import UnknownTimeZoneError +from pytz.lazy import LazyDict, LazyList, LazySet from pytz.tzinfo import unpickler from pytz.tzfile import build_tzinfo, _byte_string @@ -292,36 +289,8 @@ def _p(*args): _p.__safe_for_unpickling__ = True -class _LazyDict(DictMixin): - """Dictionary populated on first use.""" - data = None - def __getitem__(self, key): - if self.data is None: - self._fill() - return self.data[key.upper()] - def __contains__(self, key): - if self.data is None: - self._fill() - return key in self.data - - def __iter__(self): - if self.data is None: - self._fill() - return iter(self.data) - - def __len__(self): - if self.data is None: - self._fill() - return len(self.data) - - def keys(self): - if self.data is None: - self._fill() - return self.data.keys() - - -class _CountryTimezoneDict(_LazyDict): +class _CountryTimezoneDict(LazyDict): """Map ISO 3166 country code to a list of timezone names commonly used in that country. @@ -379,7 +348,7 @@ class _CountryTimezoneDict(_LazyDict): country_timezones = _CountryTimezoneDict() -class _CountryNameDict(_LazyDict): +class _CountryNameDict(LazyDict): '''Dictionary proving ISO3166 code -> English name. >>> print(country_names['au']) @@ -1099,10 +1068,10 @@ all_timezones = \ 'W-SU', 'WET', 'Zulu'] -all_timezones = [ - tz for tz in all_timezones if resource_exists(tz)] +all_timezones = LazyList( + tz for tz in all_timezones if resource_exists(tz)) -all_timezones_set = set(all_timezones) +all_timezones_set = LazySet(all_timezones) common_timezones = \ ['Africa/Abidjan', 'Africa/Accra', @@ -1537,7 +1506,7 @@ common_timezones = \ 'US/Mountain', 'US/Pacific', 'UTC'] -common_timezones = [ - tz for tz in common_timezones if tz in all_timezones] +common_timezones = LazyList( + tz for tz in common_timezones if tz in all_timezones) -common_timezones_set = set(common_timezones) +common_timezones_set = LazySet(common_timezones) diff --git a/awx/lib/site-packages/pytz/lazy.py b/awx/lib/site-packages/pytz/lazy.py new file mode 100644 index 0000000000..0ce052d108 --- /dev/null +++ b/awx/lib/site-packages/pytz/lazy.py @@ -0,0 +1,148 @@ +from threading import RLock +try: + from UserDict import DictMixin +except ImportError: + from collections import Mapping as DictMixin + + +_fill_lock = RLock() + + +class LazyDict(DictMixin): + """Dictionary populated on first use.""" + data = None + def __getitem__(self, key): + if self.data is None: + _fill_lock.acquire() + try: + if self.data is None: + self._fill() + finally: + _fill_lock.release() + return self.data[key.upper()] + + def __contains__(self, key): + if self.data is None: + _fill_lock.acquire() + try: + if self.data is None: + self._fill() + finally: + _fill_lock_release() + return key in self.data + + def __iter__(self): + if self.data is None: + _fill_lock.acquire() + try: + if self.data is None: + self._fill() + finally: + _fill_lock.release() + return iter(self.data) + + def __len__(self): + if self.data is None: + _fill_lock.acquire() + try: + if self.data is None: + self._fill() + finally: + _fill_lock.release() + return len(self.data) + + def keys(self): + if self.data is None: + _fill_lock.acquire() + try: + if self.data is None: + self._fill() + finally: + _fill_lock.release() + return self.data.keys() + + +class LazyList(list): + """List populated on first use.""" + def __new__(cls, fill_iter): + + class LazyList(list): + _fill_iter = None + + _props = ( + '__str__', '__repr__', '__unicode__', + '__hash__', '__sizeof__', '__cmp__', '__nonzero__', + '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', + 'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove', + 'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__', + '__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__', + '__getitem__', '__setitem__', '__delitem__', '__iter__', + '__reversed__', '__getslice__', '__setslice__', '__delslice__') + + def lazy(name): + def _lazy(self, *args, **kw): + if self._fill_iter is not None: + _fill_lock.acquire() + try: + if self._fill_iter is not None: + list.extend(self, self._fill_iter) + self._fill_iter = None + finally: + _fill_lock.release() + real = getattr(list, name) + setattr(self.__class__, name, real) + return real(self, *args, **kw) + return _lazy + + for name in _props: + setattr(LazyList, name, lazy(name)) + + new_list = LazyList() + new_list._fill_iter = fill_iter + return new_list + + +class LazySet(set): + """Set populated on first use.""" + def __new__(cls, fill_iter): + + class LazySet(set): + _fill_iter = None + + _props = ( + '__str__', '__repr__', '__unicode__', + '__hash__', '__sizeof__', '__cmp__', '__nonzero__', + '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', + '__contains__', '__len__', '__nonzero__', + '__getitem__', '__setitem__', '__delitem__', '__iter__', + '__sub__', '__and__', '__xor__', '__or__', + '__rsub__', '__rand__', '__rxor__', '__ror__', + '__isub__', '__iand__', '__ixor__', '__ior__', + 'add', 'clear', 'copy', 'difference', 'difference_update', + 'discard', 'intersection', 'intersection_update', 'isdisjoint', + 'issubset', 'issuperset', 'pop', 'remove', + 'symmetric_difference', 'symmetric_difference_update', + 'union', 'update') + + def lazy(name): + def _lazy(self, *args, **kw): + if self._fill_iter is not None: + _fill_lock.acquire() + try: + if self._fill_iter is not None: + for i in self._fill_iter: + set.add(self, i) + self._fill_iter = None + finally: + _fill_lock.release() + real = getattr(set, name) + setattr(self.__class__, name, real) + return real(self, *args, **kw) + return _lazy + + for name in _props: + setattr(LazySet, name, lazy(name)) + + new_set = LazySet() + new_set._fill_iter = fill_iter + return new_set diff --git a/awx/lib/site-packages/pytz/zoneinfo/Africa/Casablanca b/awx/lib/site-packages/pytz/zoneinfo/Africa/Casablanca index 13b90fa594..576c069b00 100644 Binary files a/awx/lib/site-packages/pytz/zoneinfo/Africa/Casablanca and b/awx/lib/site-packages/pytz/zoneinfo/Africa/Casablanca differ diff --git a/awx/lib/site-packages/pytz/zoneinfo/America/Asuncion b/awx/lib/site-packages/pytz/zoneinfo/America/Asuncion index a0c56370aa..59f78918fa 100644 Binary files a/awx/lib/site-packages/pytz/zoneinfo/America/Asuncion and b/awx/lib/site-packages/pytz/zoneinfo/America/Asuncion differ diff --git a/awx/lib/site-packages/pytz/zoneinfo/Antarctica/Macquarie b/awx/lib/site-packages/pytz/zoneinfo/Antarctica/Macquarie index 43e01c0989..cbad31bb94 100644 Binary files a/awx/lib/site-packages/pytz/zoneinfo/Antarctica/Macquarie and b/awx/lib/site-packages/pytz/zoneinfo/Antarctica/Macquarie differ diff --git a/awx/lib/site-packages/pytz/zoneinfo/Asia/Gaza b/awx/lib/site-packages/pytz/zoneinfo/Asia/Gaza index 4582b3b2c7..a010d76907 100644 Binary files a/awx/lib/site-packages/pytz/zoneinfo/Asia/Gaza and b/awx/lib/site-packages/pytz/zoneinfo/Asia/Gaza differ diff --git a/awx/lib/site-packages/pytz/zoneinfo/Asia/Hebron b/awx/lib/site-packages/pytz/zoneinfo/Asia/Hebron index b5bcaa0d2f..2d359ba001 100644 Binary files a/awx/lib/site-packages/pytz/zoneinfo/Asia/Hebron and b/awx/lib/site-packages/pytz/zoneinfo/Asia/Hebron differ diff --git a/awx/lib/site-packages/pytz/zoneinfo/Asia/Jerusalem b/awx/lib/site-packages/pytz/zoneinfo/Asia/Jerusalem index e7864171d1..8a0c8db164 100644 Binary files a/awx/lib/site-packages/pytz/zoneinfo/Asia/Jerusalem and b/awx/lib/site-packages/pytz/zoneinfo/Asia/Jerusalem differ diff --git a/awx/lib/site-packages/pytz/zoneinfo/Asia/Tel_Aviv b/awx/lib/site-packages/pytz/zoneinfo/Asia/Tel_Aviv index e7864171d1..8a0c8db164 100644 Binary files a/awx/lib/site-packages/pytz/zoneinfo/Asia/Tel_Aviv and b/awx/lib/site-packages/pytz/zoneinfo/Asia/Tel_Aviv differ diff --git a/awx/lib/site-packages/pytz/zoneinfo/Israel b/awx/lib/site-packages/pytz/zoneinfo/Israel index e7864171d1..8a0c8db164 100644 Binary files a/awx/lib/site-packages/pytz/zoneinfo/Israel and b/awx/lib/site-packages/pytz/zoneinfo/Israel differ diff --git a/awx/lib/site-packages/pytz/zoneinfo/iso3166.tab b/awx/lib/site-packages/pytz/zoneinfo/iso3166.tab index b952ca1c59..c184a812e3 100644 --- a/awx/lib/site-packages/pytz/zoneinfo/iso3166.tab +++ b/awx/lib/site-packages/pytz/zoneinfo/iso3166.tab @@ -1,16 +1,14 @@ -# <pre> -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. # ISO 3166 alpha-2 country codes # -# From Paul Eggert (2006-09-27): +# This file is in the public domain, so clarified as of +# 2009-05-17 by Arthur David Olson. +# +# From Paul Eggert (2013-05-27): # # This file contains a table with the following columns: # 1. ISO 3166-1 alpha-2 country code, current as of -# ISO 3166-1 Newsletter VI-1 (2007-09-21). See: -# <a href="http://www.iso.org/iso/en/prods-services/iso3166ma/index.html"> -# ISO 3166 Maintenance agency (ISO 3166/MA) -# </a>. +# ISO 3166-1 Newsletter VI-15 (2013-05-10). See: Updates on ISO 3166 +# http://www.iso.org/iso/home/standards/country_codes/updates_on_iso_3166.htm # 2. The usual English name for the country, # chosen so that alphabetic sorting of subsets produces helpful lists. # This is not the same as the English name in the ISO 3166 tables. @@ -20,8 +18,9 @@ # # Lines beginning with `#' are comments. # -# From Arthur David Olson (2011-08-17): -# Resynchronized today with the ISO 3166 site (adding SS for South Sudan). +# This table is intended as an aid for users, to help them select time +# zone data appropriate for their practical needs. It is not intended +# to take or endorse any position on legal or territorial claims. # #country- #code country name @@ -54,7 +53,7 @@ BL St Barthelemy BM Bermuda BN Brunei BO Bolivia -BQ Bonaire Sint Eustatius & Saba +BQ Bonaire, St Eustatius & Saba BR Brazil BS Bahamas BT Bhutan @@ -235,7 +234,7 @@ SR Suriname SS South Sudan ST Sao Tome & Principe SV El Salvador -SX Sint Maarten +SX St Maarten (Dutch part) SY Syria SZ Swaziland TC Turks & Caicos Is diff --git a/awx/lib/site-packages/pytz/zoneinfo/zone.tab b/awx/lib/site-packages/pytz/zoneinfo/zone.tab index c1cd95e89e..3ec24a7642 100644 --- a/awx/lib/site-packages/pytz/zoneinfo/zone.tab +++ b/awx/lib/site-packages/pytz/zoneinfo/zone.tab @@ -1,18 +1,21 @@ -# <pre> +# TZ zone descriptions +# # This file is in the public domain, so clarified as of # 2009-05-17 by Arthur David Olson. # -# TZ zone descriptions -# -# From Paul Eggert (1996-08-05): +# From Paul Eggert (2013-05-27): # # This file contains a table with the following columns: # 1. ISO 3166 2-character country code. See the file `iso3166.tab'. +# This identifies a country that overlaps the zone. The country may +# overlap other zones and the zone may overlap other countries. # 2. Latitude and longitude of the zone's principal location # in ISO 6709 sign-degrees-minutes-seconds format, # either +-DDMM+-DDDMM or +-DDMMSS+-DDDMMSS, # first latitude (+ is north), then longitude (+ is east). +# This location need not lie within the column-1 country. # 3. Zone name used in value of TZ environment variable. +# Please see the 'Theory' file for how zone names are chosen. # 4. Comments; present if and only if the country has multiple rows. # # Columns are separated by a single tab. @@ -22,6 +25,10 @@ # # Lines beginning with `#' are comments. # +# This table is intended as an aid for users, to help them select time +# zone data appropriate for their practical needs. It is not intended +# to take or endorse any position on legal or territorial claims. +# #country- #code coordinates TZ comments AD +4230+00131 Europe/Andorra @@ -42,7 +49,6 @@ AQ -6617+11031 Antarctica/Casey Casey Station, Bailey Peninsula AQ -7824+10654 Antarctica/Vostok Vostok Station, Lake Vostok AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville Station, Terre Adelie AQ -690022+0393524 Antarctica/Syowa Syowa Station, E Ongul I -AQ -5430+15857 Antarctica/Macquarie Macquarie Island Station, Macquarie Island AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF) AR -3124-06411 America/Argentina/Cordoba most locations (CB, CC, CN, ER, FM, MN, SE, SF) AR -2447-06525 America/Argentina/Salta (SA, LP, NQ, RN) @@ -58,6 +64,7 @@ AR -5448-06818 America/Argentina/Ushuaia Tierra del Fuego (TF) AS -1416-17042 Pacific/Pago_Pago AT +4813+01620 Europe/Vienna AU -3133+15905 Australia/Lord_Howe Lord Howe Island +AU -5430+15857 Antarctica/Macquarie Macquarie Island AU -4253+14719 Australia/Hobart Tasmania - most locations AU -3956+14352 Australia/Currie Tasmania - King Island AU -3749+14458 Australia/Melbourne Victoria @@ -216,7 +223,7 @@ ID -0002+10920 Asia/Pontianak west & central Borneo ID -0507+11924 Asia/Makassar east & south Borneo, Sulawesi (Celebes), Bali, Nusa Tengarra, west Timor ID -0232+14042 Asia/Jayapura west New Guinea (Irian Jaya) & Malukus (Moluccas) IE +5320-00615 Europe/Dublin -IL +3146+03514 Asia/Jerusalem +IL +314650+0351326 Asia/Jerusalem IM +5409-00428 Europe/Isle_of_Man IN +2232+08822 Asia/Kolkata IO -0720+07225 Indian/Chagos diff --git a/awx/lib/site-packages/rackspace_auth_openstack/__init__.py b/awx/lib/site-packages/rackspace_auth_openstack/__init__.py new file mode 100644 index 0000000000..092eb22320 --- /dev/null +++ b/awx/lib/site-packages/rackspace_auth_openstack/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 Rackspace +# Copyright 2012 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/awx/lib/site-packages/rackspace_auth_openstack/plugin.py b/awx/lib/site-packages/rackspace_auth_openstack/plugin.py new file mode 100644 index 0000000000..e5f630b5de --- /dev/null +++ b/awx/lib/site-packages/rackspace_auth_openstack/plugin.py @@ -0,0 +1,45 @@ +# Copyright 2012 Rackspace +# Copyright 2012 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def auth_url_us(): + """Return the Rackspace Cloud US Auth URL""" + return "https://identity.api.rackspacecloud.com/v2.0/" + + +def auth_url_uk(): + """Return the Rackspace Cloud UK Auth URL""" + return "https://lon.identity.api.rackspacecloud.com/v2.0/" + + +def _authenticate(cls, auth_url): + """Authenticate against the Rackspace auth service.""" + body = {"auth": { + "RAX-KSKEY:apiKeyCredentials": { + "username": cls.user, + "apiKey": cls.password, + "tenantName": cls.projectid}}} + return cls._authenticate(auth_url, body) + + +def authenticate_us(cls, + auth_url=auth_url_us()): + """Authenticate against the Rackspace US auth service.""" + return _authenticate(cls, auth_url) + +def authenticate_uk(cls, + auth_url=auth_url_uk()): + """Authenticate against the Rackspace UK auth service.""" + return _authenticate(cls, auth_url) diff --git a/awx/lib/site-packages/rax_default_network_flags_python_novaclient_ext/__init__.py b/awx/lib/site-packages/rax_default_network_flags_python_novaclient_ext/__init__.py new file mode 100644 index 0000000000..10773eb1b7 --- /dev/null +++ b/awx/lib/site-packages/rax_default_network_flags_python_novaclient_ext/__init__.py @@ -0,0 +1,75 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Instance create default networks extension +""" +from novaclient import utils +from novaclient.v1_1 import servers +from novaclient.v1_1 import shell + + +def add_args(): + utils.add_arg(shell.do_boot, + '--no-public', + dest='public', + action='store_false', + default=True, + help='Boot instance without public network connectivity.') + utils.add_arg(shell.do_boot, + '--no-service-net', + dest='service_net', + action='store_false', + default=True, + help='Boot instance without service network connectivity.') + + +def bind_args_to_resource_manager(args): + def add_default_networks_config(args): + return dict(public=args.public, service_net=args.service_net) + + utils.add_resource_manager_extra_kwargs_hook( + shell.do_boot, add_default_networks_config) + + +def add_modify_body_hook(): + def modify_body_for_create(body, **kwargs): + if not body.get('server'): + # NOTE(tr3buchet) need to figure why this is being triggered on + # network creates, quick fix for now.. + return + public = kwargs.get('public') + service_net = kwargs.get('service_net') + networks = body['server'].get('networks') or [] + pub_dict = {'uuid': '00000000-0000-0000-0000-000000000000'} + snet_dict = {'uuid': '11111111-1111-1111-1111-111111111111'} + if public and pub_dict not in networks: + networks.append(pub_dict) + if service_net and snet_dict not in networks: + networks.append(snet_dict) + + body['server']['networks'] = networks + + servers.ServerManager.add_hook( + 'modify_body_for_create', modify_body_for_create) + + +def __pre_parse_args__(): + add_args() + + +def __post_parse_args__(args): + bind_args_to_resource_manager(args) + add_modify_body_hook() diff --git a/awx/lib/site-packages/rax_scheduled_images_python_novaclient_ext/__init__.py b/awx/lib/site-packages/rax_scheduled_images_python_novaclient_ext/__init__.py new file mode 100644 index 0000000000..7dbeced7be --- /dev/null +++ b/awx/lib/site-packages/rax_scheduled_images_python_novaclient_ext/__init__.py @@ -0,0 +1,102 @@ +# Copyright 2013 Rackspace, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduled Images interface (1.1 extension). +""" + +import urllib + +from novaclient import base +from novaclient import exceptions +from novaclient import utils + + +class ScheduledImage(base.Resource): + """Represents the settings for a scheduled image""" + def __repr__(self): + return "<ScheduledImage>" + + +class ScheduledImageManager(base.Manager): + resource_class = ScheduledImage + + def get(self, server_id): + """ + Get the scheduled image information for the server. + + :param server_id: The ID of the server to query for. + :rtype: :class:`ScheduledImage` + """ + try: + return self._get("/servers/%s/rax-si-image-schedule" % server_id, "image_schedule") + except exceptions.NotFound: + msg = "Scheduled images not enabled for server %s" % server_id + raise exceptions.NotFound(404, msg) + + def disable(self, server_id): + """ + Disable the creation of scheduled images for the server. + + :param server_id: The ID of the server to disable scheduled images for. + """ + self._delete("/servers/%s/rax-si-image-schedule" % server_id) + + def enable(self, server_id, retention): + """ + Enable the creation of scheduled images for the server. + + :param server_id: The ID of the server to enable scheduled images for. + :param retention: The number of scheduled images to retain. + :rtype: :class:`ScheduledImage` + """ + try: + retention_val = int(retention) + except ValueError: + msg = "Retention value must be an integer" + raise exceptions.BadRequest(400, msg) + + body = {'image_schedule': {'retention': int(retention)}} + return self._create("/servers/%s/rax-si-image-schedule" % server_id, + body, "image_schedule") + +def _find_server(cs, server): + """Get a server by name or ID.""" + return utils.find_resource(cs.servers, server) + + +@utils.arg('server', metavar='<server>', help='Name or ID of server.') +def do_scheduled_images_show(cs, args): + """Show the scheduled image settings for a server""" + server_id = _find_server(cs, args.server).id + result = cs.rax_scheduled_images_python_novaclient_ext.get(server_id) + print "Retention: %s" % result.retention + + +@utils.arg('server', metavar='<server>', help='Name or ID of server.') +@utils.arg('retention', metavar='<retention>', + help='Number of scheduled images to retain') +def do_scheduled_images_enable(cs, args): + """Enable scheduled images for a server""" + server_id = _find_server(cs, args.server).id + result = cs.rax_scheduled_images_python_novaclient_ext.enable( + server_id, args.retention) + + +@utils.arg('server', metavar='<server>', help='Name or ID of server.') +def do_scheduled_images_disable(cs, args): + """Disable scheduled images for a server""" + server_id = _find_server(cs, args.server).id + result = cs.rax_scheduled_images_python_novaclient_ext.disable(server_id) diff --git a/awx/lib/site-packages/requests/__init__.py b/awx/lib/site-packages/requests/__init__.py index 1af8d8ed2e..837f0df9a4 100644 --- a/awx/lib/site-packages/requests/__init__.py +++ b/awx/lib/site-packages/requests/__init__.py @@ -42,15 +42,15 @@ is at <http://python-requests.org>. """ __title__ = 'requests' -__version__ = '1.2.3' -__build__ = 0x010203 +__version__ = '2.0.0' +__build__ = 0x020000 __author__ = 'Kenneth Reitz' __license__ = 'Apache 2.0' __copyright__ = 'Copyright 2013 Kenneth Reitz' # Attempt to enable urllib3's SNI support, if possible try: - from requests.packages.urllib3.contrib import pyopenssl + from .packages.urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() except ImportError: pass diff --git a/awx/lib/site-packages/requests/adapters.py b/awx/lib/site-packages/requests/adapters.py index 98b7317edb..d557b74629 100644 --- a/awx/lib/site-packages/requests/adapters.py +++ b/awx/lib/site-packages/requests/adapters.py @@ -11,11 +11,12 @@ and maintain connections. import socket from .models import Response -from .packages.urllib3.poolmanager import PoolManager, ProxyManager +from .packages.urllib3.poolmanager import PoolManager, proxy_from_url from .packages.urllib3.response import HTTPResponse +from .packages.urllib3.util import Timeout as TimeoutSauce from .compat import urlparse, basestring, urldefrag, unquote from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, - prepend_scheme_if_needed, get_auth_from_url) + except_on_missing_scheme, get_auth_from_url) from .structures import CaseInsensitiveDict from .packages.urllib3.exceptions import MaxRetryError from .packages.urllib3.exceptions import TimeoutError @@ -71,6 +72,7 @@ class HTTPAdapter(BaseAdapter): pool_block=DEFAULT_POOLBLOCK): self.max_retries = max_retries self.config = {} + self.proxy_manager = {} super(HTTPAdapter, self).__init__() @@ -118,7 +120,7 @@ class HTTPAdapter(BaseAdapter): :param verify: Whether we should actually verify the certificate. :param cert: The SSL certificate to verify. """ - if url.startswith('https') and verify: + if url.lower().startswith('https') and verify: cert_loc = None @@ -184,19 +186,26 @@ class HTTPAdapter(BaseAdapter): def get_connection(self, url, proxies=None): """Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the - :class:`HTTPAdapter <reqeusts.adapters.HTTPAdapter>`. + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. """ proxies = proxies or {} - proxy = proxies.get(urlparse(url).scheme) + proxy = proxies.get(urlparse(url.lower()).scheme) if proxy: - proxy = prepend_scheme_if_needed(proxy, urlparse(url).scheme) - conn = ProxyManager(self.poolmanager.connection_from_url(proxy)) + except_on_missing_scheme(proxy) + proxy_headers = self.proxy_headers(proxy) + + if not proxy in self.proxy_manager: + self.proxy_manager[proxy] = proxy_from_url( + proxy, + proxy_headers=proxy_headers) + + conn = self.proxy_manager[proxy].connection_from_url(url) else: - conn = self.poolmanager.connection_from_url(url) + conn = self.poolmanager.connection_from_url(url.lower()) return conn @@ -214,7 +223,7 @@ class HTTPAdapter(BaseAdapter): If the message is being sent through a proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. - This shoudl not be called from user code, and is only exposed for use + This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. @@ -232,8 +241,9 @@ class HTTPAdapter(BaseAdapter): return url def add_headers(self, request, **kwargs): - """Add any headers needed by the connection. Currently this adds a - Proxy-Authorization header. + """Add any headers needed by the connection. As of v2.0 this does + nothing by default, but is left for overriding by users that subclass + the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. This should not be called from user code, and is only exposed for use when subclassing the @@ -242,12 +252,22 @@ class HTTPAdapter(BaseAdapter): :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param kwargs: The keyword arguments from the call to send(). """ - proxies = kwargs.get('proxies', {}) + pass - if proxies is None: - proxies = {} + def proxy_headers(self, proxy): + """Returns a dictionary of the headers to add to any request sent + through a proxy. This works with urllib3 magic to ensure that they are + correctly sent to the proxy, rather than in a tunnelled request if + CONNECT is being used. - proxy = proxies.get(urlparse(request.url).scheme) + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + :param proxies: The url of the proxy being used for this request. + :param kwargs: Optional additional keyword arguments. + """ + headers = {} username, password = get_auth_from_url(proxy) if username and password: @@ -255,8 +275,10 @@ class HTTPAdapter(BaseAdapter): # to decode them. username = unquote(username) password = unquote(password) - request.headers['Proxy-Authorization'] = _basic_auth_str(username, - password) + headers['Proxy-Authorization'] = _basic_auth_str(username, + password) + + return headers def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. @@ -273,10 +295,15 @@ class HTTPAdapter(BaseAdapter): self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) - self.add_headers(request, proxies=proxies) + self.add_headers(request) chunked = not (request.body is None or 'Content-Length' in request.headers) + if stream: + timeout = TimeoutSauce(connect=timeout) + else: + timeout = TimeoutSauce(connect=timeout, read=timeout) + try: if not chunked: resp = conn.urlopen( diff --git a/awx/lib/site-packages/requests/auth.py b/awx/lib/site-packages/requests/auth.py index fab05cf3bc..30529e296e 100644 --- a/awx/lib/site-packages/requests/auth.py +++ b/awx/lib/site-packages/requests/auth.py @@ -18,7 +18,6 @@ from base64 import b64encode from .compat import urlparse, str from .utils import parse_dict_header - log = logging.getLogger(__name__) CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' @@ -106,7 +105,9 @@ class HTTPDigestAuth(AuthBase): A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) - if qop == 'auth': + if qop is None: + respdig = KD(hash_utf8(A1), "%s:%s" % (nonce, hash_utf8(A2))) + elif qop == 'auth' or 'auth' in qop.split(','): if nonce == self.last_nonce: self.nonce_count += 1 else: @@ -121,8 +122,6 @@ class HTTPDigestAuth(AuthBase): cnonce = (hashlib.sha1(s).hexdigest()[:16]) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, hash_utf8(A2)) respdig = KD(hash_utf8(A1), noncebit) - elif qop is None: - respdig = KD(hash_utf8(A1), "%s:%s" % (nonce, hash_utf8(A2))) else: # XXX handle auth-int. return None @@ -159,10 +158,14 @@ class HTTPDigestAuth(AuthBase): # to allow our new request to reuse the same one. r.content r.raw.release_conn() + prep = r.request.copy() + prep.prepare_cookies(r.cookies) - r.request.headers['Authorization'] = self.build_digest_header(r.request.method, r.request.url) - _r = r.connection.send(r.request, **kwargs) + prep.headers['Authorization'] = self.build_digest_header( + prep.method, prep.url) + _r = r.connection.send(prep, **kwargs) _r.history.append(r) + _r.request = prep return _r diff --git a/awx/lib/site-packages/requests/compat.py b/awx/lib/site-packages/requests/compat.py index bcf94b0067..0d61a572df 100644 --- a/awx/lib/site-packages/requests/compat.py +++ b/awx/lib/site-packages/requests/compat.py @@ -83,13 +83,14 @@ except ImportError: # --------- if is_py2: - from urllib import quote, unquote, quote_plus, unquote_plus, urlencode + from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag from urllib2 import parse_http_list import cookielib from Cookie import Morsel from StringIO import StringIO from .packages.urllib3.packages.ordered_dict import OrderedDict + from httplib import IncompleteRead builtin_str = str bytes = str @@ -100,11 +101,12 @@ if is_py2: elif is_py3: from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag - from urllib.request import parse_http_list + from urllib.request import parse_http_list, getproxies, proxy_bypass from http import cookiejar as cookielib from http.cookies import Morsel from io import StringIO from collections import OrderedDict + from http.client import IncompleteRead builtin_str = str str = str diff --git a/awx/lib/site-packages/requests/cookies.py b/awx/lib/site-packages/requests/cookies.py index d759d0a977..f3ac64f0a3 100644 --- a/awx/lib/site-packages/requests/cookies.py +++ b/awx/lib/site-packages/requests/cookies.py @@ -6,6 +6,7 @@ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. """ +import time import collections from .compat import cookielib, urlparse, Morsel @@ -73,6 +74,10 @@ class MockRequest(object): def origin_req_host(self): return self.get_origin_req_host() + @property + def host(self): + return self.get_host() + class MockResponse(object): """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. @@ -102,6 +107,9 @@ def extract_cookies_to_jar(jar, request, response): :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ + if not (hasattr(response, '_original_response') and + response._original_response): + return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) # pull out the HTTPMessage with the headers and put it in the mock: @@ -258,6 +266,11 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping): """Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name().""" remove_cookie_by_name(self, name) + def set_cookie(self, cookie, *args, **kwargs): + if cookie.value.startswith('"') and cookie.value.endswith('"'): + cookie.value = cookie.value.replace('\\"', '') + return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) + def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): @@ -354,19 +367,23 @@ def create_cookie(name, value, **kwargs): def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" + expires = None + if morsel["max-age"]: + expires = time.time() + morsel["max-age"] + elif morsel['expires']: + expires = morsel['expires'] + if type(expires) == type(""): + time_template = "%a, %d-%b-%Y %H:%M:%S GMT" + expires = time.mktime(time.strptime(expires, time_template)) c = create_cookie( name=morsel.key, value=morsel.value, version=morsel['version'] or 0, port=None, - port_specified=False, domain=morsel['domain'], - domain_specified=bool(morsel['domain']), - domain_initial_dot=morsel['domain'].startswith('.'), path=morsel['path'], - path_specified=bool(morsel['path']), secure=bool(morsel['secure']), - expires=morsel['max-age'] or morsel['expires'], + expires=expires, discard=False, comment=morsel['comment'], comment_url=bool(morsel['comment']), diff --git a/awx/lib/site-packages/requests/exceptions.py b/awx/lib/site-packages/requests/exceptions.py index c0588f6aed..22207e3539 100644 --- a/awx/lib/site-packages/requests/exceptions.py +++ b/awx/lib/site-packages/requests/exceptions.py @@ -9,7 +9,7 @@ This module contains the set of Requests' exceptions. """ -class RequestException(RuntimeError): +class RequestException(IOError): """There was an ambiguous exception that occurred while handling your request.""" @@ -53,3 +53,7 @@ class InvalidSchema(RequestException, ValueError): class InvalidURL(RequestException, ValueError): """ The URL provided was somehow invalid. """ + + +class ChunkedEncodingError(RequestException): + """The server declared chunked encoding but sent an invalid chunk.""" diff --git a/awx/lib/site-packages/requests/models.py b/awx/lib/site-packages/requests/models.py index 6cf2aaa1a6..8fd973535c 100644 --- a/awx/lib/site-packages/requests/models.py +++ b/awx/lib/site-packages/requests/models.py @@ -11,7 +11,7 @@ import collections import logging import datetime -from io import BytesIO +from io import BytesIO, UnsupportedOperation from .hooks import default_hooks from .structures import CaseInsensitiveDict @@ -19,14 +19,16 @@ from .auth import HTTPBasicAuth from .cookies import cookiejar_from_dict, get_cookie_header from .packages.urllib3.filepost import encode_multipart_formdata from .packages.urllib3.util import parse_url -from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL +from .exceptions import ( + HTTPError, RequestException, MissingSchema, InvalidURL, + ChunkedEncodingError) from .utils import ( guess_filename, get_auth_from_url, requote_uri, stream_decode_response_unicode, to_key_val_list, parse_header_links, - iter_slices, guess_json_utf, super_len) + iter_slices, guess_json_utf, super_len, to_native_string) from .compat import ( - cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO, - is_py2, chardet, json, builtin_str, basestring) + cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO, + is_py2, chardet, json, builtin_str, basestring, IncompleteRead) CONTENT_CHUNK_SIZE = 10 * 1024 ITER_CHUNK_SIZE = 512 @@ -92,8 +94,10 @@ class RequestEncodingMixin(object): if parameters are supplied as a dict. """ - if (not files) or isinstance(data, str): - return None + if (not files): + raise ValueError("Files must be provided.") + elif isinstance(data, basestring): + raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) @@ -104,6 +108,10 @@ class RequestEncodingMixin(object): val = [val] for v in val: if v is not None: + # Don't call str() on bytestrings: in Py3 it all goes wrong. + if not isinstance(v, bytes): + v = str(v) + new_fields.append( (field.decode('utf-8') if isinstance(field, bytes) else field, v.encode('utf-8') if isinstance(v, str) else v)) @@ -139,6 +147,9 @@ class RequestHooksMixin(object): def register_hook(self, event, hook): """Properly register a hook.""" + if event not in self.hooks: + raise ValueError('Unsupported event specified, with event name "%s"' % (event)) + if isinstance(hook, collections.Callable): self.hooks[event].append(hook) elif hasattr(hook, '__iter__'): @@ -184,8 +195,8 @@ class Request(RequestHooksMixin): url=None, headers=None, files=None, - data=dict(), - params=dict(), + data=None, + params=None, auth=None, cookies=None, hooks=None): @@ -209,7 +220,6 @@ class Request(RequestHooksMixin): self.params = params self.auth = auth self.cookies = cookies - self.hooks = hooks def __repr__(self): return '<Request [%s]>' % (self.method) @@ -217,19 +227,17 @@ class Request(RequestHooksMixin): def prepare(self): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" p = PreparedRequest() - - p.prepare_method(self.method) - p.prepare_url(self.url, self.params) - p.prepare_headers(self.headers) - p.prepare_cookies(self.cookies) - p.prepare_body(self.data, self.files) - p.prepare_auth(self.auth, self.url) - # Note that prepare_auth must be last to enable authentication schemes - # such as OAuth to work on a fully prepared request. - - # This MUST go after prepare_auth. Authenticators could add a hook - p.prepare_hooks(self.hooks) - + p.prepare( + method=self.method, + url=self.url, + headers=self.headers, + files=self.files, + data=self.data, + params=self.params, + auth=self.auth, + cookies=self.cookies, + hooks=self.hooks, + ) return p @@ -264,9 +272,34 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): #: dictionary of callback hooks, for internal usage. self.hooks = default_hooks() + def prepare(self, method=None, url=None, headers=None, files=None, + data=None, params=None, auth=None, cookies=None, hooks=None): + """Prepares the the entire request with the given parameters.""" + + self.prepare_method(method) + self.prepare_url(url, params) + self.prepare_headers(headers) + self.prepare_cookies(cookies) + self.prepare_body(data, files) + self.prepare_auth(auth, url) + # Note that prepare_auth must be last to enable authentication schemes + # such as OAuth to work on a fully prepared request. + + # This MUST go after prepare_auth. Authenticators could add a hook + self.prepare_hooks(hooks) + def __repr__(self): return '<PreparedRequest [%s]>' % (self.method) + def copy(self): + p = PreparedRequest() + p.method = self.method + p.url = self.url + p.headers = self.headers + p.body = self.body + p.hooks = self.hooks + return p + def prepare_method(self, method): """Prepares the given HTTP method.""" self.method = method @@ -337,8 +370,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): """Prepares the given HTTP headers.""" if headers: - headers = dict((name.encode('ascii'), value) for name, value in headers.items()) - self.headers = CaseInsensitiveDict(headers) + self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items()) else: self.headers = CaseInsensitiveDict() @@ -352,7 +384,6 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): body = None content_type = None length = None - is_stream = False is_stream = all([ hasattr(data, '__iter__'), @@ -363,8 +394,8 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): try: length = super_len(data) - except (TypeError, AttributeError): - length = False + except (TypeError, AttributeError, UnsupportedOperation): + length = None if is_stream: body = data @@ -372,13 +403,10 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): if files: raise NotImplementedError('Streamed bodies and files are mutually exclusive.') - if length: + if length is not None: self.headers['Content-Length'] = str(length) else: self.headers['Transfer-Encoding'] = 'chunked' - # Check if file, fo, generator, iterator. - # If not, run through normal process. - else: # Multi-part file uploads. if files: @@ -537,11 +565,22 @@ class Response(object): return iter_slices(self._content, chunk_size) def generate(): - while 1: - chunk = self.raw.read(chunk_size, decode_content=True) - if not chunk: - break - yield chunk + try: + # Special case for urllib3. + try: + for chunk in self.raw.stream(chunk_size, + decode_content=True): + yield chunk + except IncompleteRead as e: + raise ChunkedEncodingError(e) + except AttributeError: + # Standard file-like object. + while 1: + chunk = self.raw.read(chunk_size) + if not chunk: + break + yield chunk + self._content_consumed = True gen = generate() @@ -683,4 +722,9 @@ class Response(object): raise HTTPError(http_error_msg, response=self) def close(self): + """Closes the underlying file descriptor and releases the connection + back to the pool. + + *Note: Should not normally need to be called explicitly.* + """ return self.raw.release_conn() diff --git a/awx/lib/site-packages/requests/packages/urllib3/__init__.py b/awx/lib/site-packages/requests/packages/urllib3/__init__.py index bff80b8eb4..73071f7001 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/__init__.py +++ b/awx/lib/site-packages/requests/packages/urllib3/__init__.py @@ -23,7 +23,7 @@ from . import exceptions from .filepost import encode_multipart_formdata from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .response import HTTPResponse -from .util import make_headers, get_host +from .util import make_headers, get_host, Timeout # Set default logging handler to avoid "No handler found" warnings. diff --git a/awx/lib/site-packages/requests/packages/urllib3/_collections.py b/awx/lib/site-packages/requests/packages/urllib3/_collections.py index b35a73672e..282b8d5e05 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/_collections.py +++ b/awx/lib/site-packages/requests/packages/urllib3/_collections.py @@ -5,7 +5,7 @@ # the MIT License: http://www.opensource.org/licenses/mit-license.php from collections import MutableMapping -from threading import Lock +from threading import RLock try: # Python 2.7+ from collections import OrderedDict @@ -40,18 +40,18 @@ class RecentlyUsedContainer(MutableMapping): self.dispose_func = dispose_func self._container = self.ContainerCls() - self._lock = Lock() + self.lock = RLock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. - with self._lock: + with self.lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null - with self._lock: + with self.lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value @@ -65,21 +65,21 @@ class RecentlyUsedContainer(MutableMapping): self.dispose_func(evicted_value) def __delitem__(self, key): - with self._lock: + with self.lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): - with self._lock: + with self.lock: return len(self._container) def __iter__(self): raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') def clear(self): - with self._lock: + with self.lock: # Copy pointers to all values, then wipe the mapping # under Python 2, this copies the list of values twice :-| values = list(self._container.values()) @@ -90,5 +90,5 @@ class RecentlyUsedContainer(MutableMapping): self.dispose_func(value) def keys(self): - with self._lock: + with self.lock: return self._container.keys() diff --git a/awx/lib/site-packages/requests/packages/urllib3/connectionpool.py b/awx/lib/site-packages/requests/packages/urllib3/connectionpool.py index f3e926089f..691d4e238b 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/connectionpool.py +++ b/awx/lib/site-packages/requests/packages/urllib3/connectionpool.py @@ -4,12 +4,11 @@ # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -import logging -import socket import errno +import logging from socket import error as SocketError, timeout as SocketTimeout -from .util import resolve_cert_reqs, resolve_ssl_version, assert_fingerprint +import socket try: # Python 3 from http.client import HTTPConnection, HTTPException @@ -22,11 +21,15 @@ try: # Python 3 from queue import LifoQueue, Empty, Full except ImportError: from Queue import LifoQueue, Empty, Full + import Queue as _ # Platform-specific: Windows try: # Compiled with SSL? HTTPSConnection = object - BaseSSLError = None + + class BaseSSLError(BaseException): + pass + ssl = None try: # Python 3 @@ -41,21 +44,29 @@ except (ImportError, AttributeError): # Platform-specific: No SSL. pass -from .request import RequestMethods -from .response import HTTPResponse -from .util import get_host, is_connection_dropped, ssl_wrap_socket from .exceptions import ( ClosedPoolError, + ConnectTimeoutError, EmptyPoolError, HostChangedError, MaxRetryError, SSLError, - TimeoutError, + ReadTimeoutError, + ProxyError, ) - -from .packages.ssl_match_hostname import match_hostname, CertificateError +from .packages.ssl_match_hostname import CertificateError, match_hostname from .packages import six - +from .request import RequestMethods +from .response import HTTPResponse +from .util import ( + assert_fingerprint, + get_host, + is_connection_dropped, + resolve_cert_reqs, + resolve_ssl_version, + ssl_wrap_socket, + Timeout, +) xrange = six.moves.xrange @@ -93,11 +104,24 @@ class VerifiedHTTPSConnection(HTTPSConnection): def connect(self): # Add certificate verification - sock = socket.create_connection((self.host, self.port), self.timeout) + try: + sock = socket.create_connection( + address=(self.host, self.port), + timeout=self.timeout) + except SocketTimeout: + raise ConnectTimeoutError( + self, "Connection to %s timed out. (connect timeout=%s)" % + (self.host, self.timeout)) resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs) resolved_ssl_version = resolve_ssl_version(self.ssl_version) + if self._tunnel_host: + self.sock = sock + # Calls self._set_hostport(), so self.host is + # self._tunnel_host below. + self._tunnel() + # Wrap socket using verification with the root certs in # trusted_root_certs self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file, @@ -110,10 +134,11 @@ class VerifiedHTTPSConnection(HTTPSConnection): if self.assert_fingerprint: assert_fingerprint(self.sock.getpeercert(binary_form=True), self.assert_fingerprint) - else: + elif self.assert_hostname is not False: match_hostname(self.sock.getpeercert(), self.assert_hostname or self.host) + ## Pool objects class ConnectionPool(object): @@ -126,6 +151,9 @@ class ConnectionPool(object): QueueCls = LifoQueue def __init__(self, host, port=None): + # httplib doesn't like it when we include brackets in ipv6 addresses + host = host.strip('[]') + self.host = host self.port = port @@ -133,6 +161,8 @@ class ConnectionPool(object): return '%s(host=%r, port=%r)' % (type(self).__name__, self.host, self.port) +# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 +_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) class HTTPConnectionPool(ConnectionPool, RequestMethods): """ @@ -151,9 +181,15 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): as a valid HTTP/1.0 or 1.1 status line, passed into :class:`httplib.HTTPConnection`. + .. note:: + Only works in Python 2. This parameter is ignored in Python 3. + :param timeout: - Socket timeout for each individual connection, can be a float. None - disables timeout. + Socket timeout in seconds for each individual connection. This can + be a float or integer, which sets the timeout for the HTTP request, + or an instance of :class:`urllib3.util.Timeout` which gives you more + fine-grained control over request timeouts. After the constructor has + been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful @@ -171,20 +207,39 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): :param headers: Headers to include with all requests, unless other headers are given explicitly. + + :param _proxy: + Parsed proxy URL, should not be used directly, instead, see + :class:`urllib3.connectionpool.ProxyManager`" + + :param _proxy_headers: + A dictionary with proxy headers, should not be used directly, + instead, see :class:`urllib3.connectionpool.ProxyManager`" """ scheme = 'http' - def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1, - block=False, headers=None): + def __init__(self, host, port=None, strict=False, + timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, + headers=None, _proxy=None, _proxy_headers=None): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) self.strict = strict + + # This is for backwards compatibility and can be removed once a timeout + # can only be set to a Timeout object + if not isinstance(timeout, Timeout): + timeout = Timeout.from_float(timeout) + self.timeout = timeout + self.pool = self.QueueCls(maxsize) self.block = block + self.proxy = _proxy + self.proxy_headers = _proxy_headers or {} + # Fill the queue up so that doing get() on it will block properly for _ in xrange(maxsize): self.pool.put(None) @@ -200,9 +255,14 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): self.num_connections += 1 log.info("Starting new HTTP connection (%d): %s" % (self.num_connections, self.host)) - return HTTPConnection(host=self.host, - port=self.port, - strict=self.strict) + extra_params = {} + if not six.PY3: # Python 2 + extra_params['strict'] = self.strict + + return HTTPConnection(host=self.host, port=self.port, + timeout=self.timeout.connect_timeout, + **extra_params) + def _get_conn(self, timeout=None): """ @@ -263,31 +323,89 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): % self.host) # Connection never got put back into the pool, close it. - conn.close() + if conn: + conn.close() + + def _get_timeout(self, timeout): + """ Helper that always returns a :class:`urllib3.util.Timeout` """ + if timeout is _Default: + return self.timeout.clone() + + if isinstance(timeout, Timeout): + return timeout.clone() + else: + # User passed us an int/float. This is for backwards compatibility, + # can be removed later + return Timeout.from_float(timeout) def _make_request(self, conn, method, url, timeout=_Default, **httplib_request_kw): """ Perform a request on a given httplib connection object taken from our pool. + + :param conn: + a connection from one of our connection pools + + :param timeout: + Socket timeout in seconds for the request. This can be a + float or integer, which will set the same timeout value for + the socket connect and the socket read, or an instance of + :class:`urllib3.util.Timeout`, which gives you more fine-grained + control over your timeouts. """ self.num_requests += 1 - if timeout is _Default: - timeout = self.timeout + timeout_obj = self._get_timeout(timeout) - conn.timeout = timeout # This only does anything in Py26+ - conn.request(method, url, **httplib_request_kw) + try: + timeout_obj.start_connect() + conn.timeout = timeout_obj.connect_timeout + # conn.request() calls httplib.*.request, not the method in + # request.py. It also calls makefile (recv) on the socket + conn.request(method, url, **httplib_request_kw) + except SocketTimeout: + raise ConnectTimeoutError( + self, "Connection to %s timed out. (connect timeout=%s)" % + (self.host, timeout_obj.connect_timeout)) - # Set timeout - sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr. - if sock: - sock.settimeout(timeout) + # Reset the timeout for the recv() on the socket + read_timeout = timeout_obj.read_timeout + log.debug("Setting read timeout to %s" % read_timeout) + # App Engine doesn't have a sock attr + if hasattr(conn, 'sock') and \ + read_timeout is not None and \ + read_timeout is not Timeout.DEFAULT_TIMEOUT: + # In Python 3 socket.py will catch EAGAIN and return None when you + # try and read into the file pointer created by http.client, which + # instead raises a BadStatusLine exception. Instead of catching + # the exception and assuming all BadStatusLine exceptions are read + # timeouts, check for a zero timeout before making the request. + if read_timeout == 0: + raise ReadTimeoutError( + self, url, + "Read timed out. (read timeout=%s)" % read_timeout) + conn.sock.settimeout(read_timeout) + + # Receive the response from the server + try: + try: # Python 2.7+, use buffering of HTTP responses + httplib_response = conn.getresponse(buffering=True) + except TypeError: # Python 2.6 and older + httplib_response = conn.getresponse() + except SocketTimeout: + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % read_timeout) + + except SocketError as e: # Platform-specific: Python 2 + # See the above comment about EAGAIN in Python 3. In Python 2 we + # have to specifically catch it and throw the timeout error + if e.errno in _blocking_errnos: + raise ReadTimeoutError( + self, url, + "Read timed out. (read timeout=%s)" % read_timeout) + raise - try: # Python 2.7+, use buffering of HTTP responses - httplib_response = conn.getresponse(buffering=True) - except TypeError: # Python 2.6 and older - httplib_response = conn.getresponse() # AppEngine doesn't have a version attr. http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') @@ -367,7 +485,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): :param redirect: If True, automatically handle redirects (status codes 301, 302, - 303, 307). Each redirect counts as a retry. + 303, 307, 308). Each redirect counts as a retry. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is @@ -375,7 +493,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): use the pool on an HTTP proxy and request foreign hosts. :param timeout: - If specified, overrides the default timeout for this one request. + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will @@ -402,18 +522,11 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): if retries < 0: raise MaxRetryError(self, url) - if timeout is _Default: - timeout = self.timeout - if release_conn is None: release_conn = response_kw.get('preload_content', True) # Check host if assert_same_host and not self.is_same_host(url): - host = "%s://%s" % (self.scheme, self.host) - if self.port: - host = "%s:%d" % (host, self.port) - raise HostChangedError(self, url, retries - 1) conn = None @@ -444,20 +557,20 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): # ``response.release_conn()`` is called (implicitly by # ``response.read()``) - except Empty as e: + except Empty: # Timed out by queue - raise TimeoutError(self, url, - "Request timed out. (pool_timeout=%s)" % - pool_timeout) + raise ReadTimeoutError( + self, url, "Read timed out, no pool connections are available.") - except SocketTimeout as e: + except SocketTimeout: # Timed out by socket - raise TimeoutError(self, url, - "Request timed out. (timeout=%s)" % - timeout) + raise ReadTimeoutError(self, url, "Read timed out.") except BaseSSLError as e: # SSL certificate error + if 'timed out' in str(e) or \ + 'did not complete (read)' in str(e): # Platform-specific: Python 2.6 + raise ReadTimeoutError(self, url, "Read timed out.") raise SSLError(e) except CertificateError as e: @@ -465,6 +578,10 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): raise SSLError(e) except (HTTPException, SocketError) as e: + if isinstance(e, SocketError) and self.proxy is not None: + raise ProxyError('Cannot connect to proxy. ' + 'Socket error: %s.' % e) + # Connection broken, discard. It will be replaced next _get_conn(). conn = None # This is necessary so we can access e below @@ -513,6 +630,7 @@ class HTTPSConnectionPool(HTTPConnectionPool): :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, ``assert_hostname`` and ``host`` in this order to verify connections. + If ``assert_hostname`` is False, no verification is done. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and ``ssl_version`` are only used if :mod:`ssl` is available and are fed into @@ -525,13 +643,13 @@ class HTTPSConnectionPool(HTTPConnectionPool): def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1, block=False, headers=None, + _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None): - HTTPConnectionPool.__init__(self, host, port, - strict, timeout, maxsize, - block, headers) + HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, + block, headers, _proxy, _proxy_headers) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs @@ -540,6 +658,34 @@ class HTTPSConnectionPool(HTTPConnectionPool): self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint + def _prepare_conn(self, connection): + """ + Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` + and establish the tunnel if proxy is used. + """ + + if isinstance(connection, VerifiedHTTPSConnection): + connection.set_cert(key_file=self.key_file, + cert_file=self.cert_file, + cert_reqs=self.cert_reqs, + ca_certs=self.ca_certs, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint) + connection.ssl_version = self.ssl_version + + if self.proxy is not None: + # Python 2.7+ + try: + set_tunnel = connection.set_tunnel + except AttributeError: # Platform-specific: Python 2.6 + set_tunnel = connection._set_tunnel + set_tunnel(self.host, self.port, self.proxy_headers) + # Establish tunnel connection early, because otherwise httplib + # would improperly set Host: header to proxy's IP:port. + connection.connect() + + return connection + def _new_conn(self): """ Return a fresh :class:`httplib.HTTPSConnection`. @@ -548,26 +694,28 @@ class HTTPSConnectionPool(HTTPConnectionPool): log.info("Starting new HTTPS connection (%d): %s" % (self.num_connections, self.host)) + actual_host = self.host + actual_port = self.port + if self.proxy is not None: + actual_host = self.proxy.host + actual_port = self.proxy.port + if not ssl: # Platform-specific: Python compiled without +ssl if not HTTPSConnection or HTTPSConnection is object: raise SSLError("Can't connect to HTTPS URL because the SSL " "module is not available.") + connection_class = HTTPSConnection + else: + connection_class = VerifiedHTTPSConnection - return HTTPSConnection(host=self.host, - port=self.port, - strict=self.strict) + extra_params = {} + if not six.PY3: # Python 2 + extra_params['strict'] = self.strict + connection = connection_class(host=actual_host, port=actual_port, + timeout=self.timeout.connect_timeout, + **extra_params) - connection = VerifiedHTTPSConnection(host=self.host, - port=self.port, - strict=self.strict) - connection.set_cert(key_file=self.key_file, cert_file=self.cert_file, - cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint) - - connection.ssl_version = self.ssl_version - - return connection + return self._prepare_conn(connection) def connection_from_url(url, **kw): diff --git a/awx/lib/site-packages/requests/packages/urllib3/contrib/ntlmpool.py b/awx/lib/site-packages/requests/packages/urllib3/contrib/ntlmpool.py index 277ee0b2ab..b8cd933034 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/contrib/ntlmpool.py +++ b/awx/lib/site-packages/requests/packages/urllib3/contrib/ntlmpool.py @@ -33,7 +33,7 @@ class NTLMConnectionPool(HTTPSConnectionPool): def __init__(self, user, pw, authurl, *args, **kwargs): """ authurl is a random URL on the server that is protected by NTLM. - user is the Windows user, probably in the DOMAIN\username format. + user is the Windows user, probably in the DOMAIN\\username format. pw is the password for the user. """ super(NTLMConnectionPool, self).__init__(*args, **kwargs) diff --git a/awx/lib/site-packages/requests/packages/urllib3/contrib/pyopenssl.py b/awx/lib/site-packages/requests/packages/urllib3/contrib/pyopenssl.py index 5c4c6d8d31..d43bcd6097 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/contrib/pyopenssl.py +++ b/awx/lib/site-packages/requests/packages/urllib3/contrib/pyopenssl.py @@ -20,13 +20,13 @@ Now you can use :mod:`urllib3` as you normally would, and it will support SNI when the required modules are installed. ''' -from ndg.httpsclient.ssl_peer_verification import (ServerSSLCertVerification, - SUBJ_ALT_NAME_SUPPORT) +from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT from ndg.httpsclient.subj_alt_name import SubjectAltName import OpenSSL.SSL from pyasn1.codec.der import decoder as der_decoder from socket import _fileobject import ssl +from cStringIO import StringIO from .. import connectionpool from .. import util @@ -99,6 +99,172 @@ def get_subj_alt_name(peer_cert): return dns_name +class fileobject(_fileobject): + + def read(self, size=-1): + # Use max, disallow tiny reads in a loop as they are very inefficient. + # We never leave read() with any leftover data from a new recv() call + # in our internal buffer. + rbufsize = max(self._rbufsize, self.default_bufsize) + # Our use of StringIO rather than lists of string objects returned by + # recv() minimizes memory usage and fragmentation that occurs when + # rbufsize is large compared to the typical return value of recv(). + buf = self._rbuf + buf.seek(0, 2) # seek end + if size < 0: + # Read until EOF + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + while True: + try: + data = self._sock.recv(rbufsize) + except OpenSSL.SSL.WantReadError: + continue + if not data: + break + buf.write(data) + return buf.getvalue() + else: + # Read until size bytes or EOF seen, whichever comes first + buf_len = buf.tell() + if buf_len >= size: + # Already have size bytes in our buffer? Extract and return. + buf.seek(0) + rv = buf.read(size) + self._rbuf = StringIO() + self._rbuf.write(buf.read()) + return rv + + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + while True: + left = size - buf_len + # recv() will malloc the amount of memory given as its + # parameter even though it often returns much less data + # than that. The returned data string is short lived + # as we copy it into a StringIO and free it. This avoids + # fragmentation issues on many platforms. + try: + data = self._sock.recv(left) + except OpenSSL.SSL.WantReadError: + continue + if not data: + break + n = len(data) + if n == size and not buf_len: + # Shortcut. Avoid buffer data copies when: + # - We have no data in our buffer. + # AND + # - Our call to recv returned exactly the + # number of bytes we were asked to read. + return data + if n == left: + buf.write(data) + del data # explicit free + break + assert n <= left, "recv(%d) returned %d bytes" % (left, n) + buf.write(data) + buf_len += n + del data # explicit free + #assert buf_len == buf.tell() + return buf.getvalue() + + def readline(self, size=-1): + buf = self._rbuf + buf.seek(0, 2) # seek end + if buf.tell() > 0: + # check if we already have it in our buffer + buf.seek(0) + bline = buf.readline(size) + if bline.endswith('\n') or len(bline) == size: + self._rbuf = StringIO() + self._rbuf.write(buf.read()) + return bline + del bline + if size < 0: + # Read until \n or EOF, whichever comes first + if self._rbufsize <= 1: + # Speed up unbuffered case + buf.seek(0) + buffers = [buf.read()] + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + data = None + recv = self._sock.recv + while True: + try: + while data != "\n": + data = recv(1) + if not data: + break + buffers.append(data) + except OpenSSL.SSL.WantReadError: + continue + break + return "".join(buffers) + + buf.seek(0, 2) # seek end + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + while True: + try: + data = self._sock.recv(self._rbufsize) + except OpenSSL.SSL.WantReadError: + continue + if not data: + break + nl = data.find('\n') + if nl >= 0: + nl += 1 + buf.write(data[:nl]) + self._rbuf.write(data[nl:]) + del data + break + buf.write(data) + return buf.getvalue() + else: + # Read until size bytes or \n or EOF seen, whichever comes first + buf.seek(0, 2) # seek end + buf_len = buf.tell() + if buf_len >= size: + buf.seek(0) + rv = buf.read(size) + self._rbuf = StringIO() + self._rbuf.write(buf.read()) + return rv + self._rbuf = StringIO() # reset _rbuf. we consume it via buf. + while True: + try: + data = self._sock.recv(self._rbufsize) + except OpenSSL.SSL.WantReadError: + continue + if not data: + break + left = size - buf_len + # did we just receive a newline? + nl = data.find('\n', 0, left) + if nl >= 0: + nl += 1 + # save the excess data to _rbuf + self._rbuf.write(data[nl:]) + if buf_len: + buf.write(data[:nl]) + break + else: + # Shortcut. Avoid data copy through buf when returning + # a substring of our first recv(). + return data[:nl] + n = len(data) + if n == size and not buf_len: + # Shortcut. Avoid data copy through buf when + # returning exactly all of our first recv(). + return data + if n >= left: + buf.write(data[:left]) + self._rbuf.write(data[left:]) + break + buf.write(data) + buf_len += n + #assert buf_len == buf.tell() + return buf.getvalue() + + class WrappedSocket(object): '''API-compatibility wrapper for Python OpenSSL's Connection-class.''' @@ -106,8 +272,11 @@ class WrappedSocket(object): self.connection = connection self.socket = socket + def fileno(self): + return self.socket.fileno() + def makefile(self, mode, bufsize=-1): - return _fileobject(self.connection, mode, bufsize) + return fileobject(self.connection, mode, bufsize) def settimeout(self, timeout): return self.socket.settimeout(timeout) @@ -115,10 +284,14 @@ class WrappedSocket(object): def sendall(self, data): return self.connection.sendall(data) + def close(self): + return self.connection.shutdown() + def getpeercert(self, binary_form=False): x509 = self.connection.get_peer_certificate() + if not x509: - raise ssl.SSLError('') + return x509 if binary_form: return OpenSSL.crypto.dump_certificate( @@ -159,9 +332,13 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, cnx = OpenSSL.SSL.Connection(ctx, sock) cnx.set_tlsext_host_name(server_hostname) cnx.set_connect_state() - try: - cnx.do_handshake() - except OpenSSL.SSL.Error as e: - raise ssl.SSLError('bad handshake', e) + while True: + try: + cnx.do_handshake() + except OpenSSL.SSL.WantReadError: + continue + except OpenSSL.SSL.Error as e: + raise ssl.SSLError('bad handshake', e) + break return WrappedSocket(cnx, sock) diff --git a/awx/lib/site-packages/requests/packages/urllib3/exceptions.py b/awx/lib/site-packages/requests/packages/urllib3/exceptions.py index 2e2a259cd6..98ef9abc7f 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/exceptions.py +++ b/awx/lib/site-packages/requests/packages/urllib3/exceptions.py @@ -39,6 +39,11 @@ class SSLError(HTTPError): pass +class ProxyError(HTTPError): + "Raised when the connection to a proxy fails." + pass + + class DecodeError(HTTPError): "Raised when automatic decoding based on Content-Type fails." pass @@ -70,8 +75,29 @@ class HostChangedError(RequestError): self.retries = retries -class TimeoutError(RequestError): - "Raised when a socket timeout occurs." +class TimeoutStateError(HTTPError): + """ Raised when passing an invalid state to a timeout """ + pass + + +class TimeoutError(HTTPError): + """ Raised when a socket timeout error occurs. + + Catching this error will catch both :exc:`ReadTimeoutErrors + <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. + """ + pass + + +class ReadTimeoutError(TimeoutError, RequestError): + "Raised when a socket timeout occurs while receiving data from a server" + pass + + +# This timeout error does not have a URL attached and needs to inherit from the +# base HTTPError +class ConnectTimeoutError(TimeoutError): + "Raised when a socket timeout occurs while connecting to a server" pass diff --git a/awx/lib/site-packages/requests/packages/urllib3/fields.py b/awx/lib/site-packages/requests/packages/urllib3/fields.py new file mode 100644 index 0000000000..ed017657a2 --- /dev/null +++ b/awx/lib/site-packages/requests/packages/urllib3/fields.py @@ -0,0 +1,177 @@ +# urllib3/fields.py +# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) +# +# This module is part of urllib3 and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import email.utils +import mimetypes + +from .packages import six + + +def guess_content_type(filename, default='application/octet-stream'): + """ + Guess the "Content-Type" of a file. + + :param filename: + The filename to guess the "Content-Type" of using :mod:`mimetimes`. + :param default: + If no "Content-Type" can be guessed, default to `default`. + """ + if filename: + return mimetypes.guess_type(filename)[0] or default + return default + + +def format_header_param(name, value): + """ + Helper function to format and quote a single header parameter. + + Particularly useful for header parameters which might contain + non-ASCII values, like file names. This follows RFC 2231, as + suggested by RFC 2388 Section 4.4. + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as a unicode string. + """ + if not any(ch in value for ch in '"\\\r\n'): + result = '%s="%s"' % (name, value) + try: + result.encode('ascii') + except UnicodeEncodeError: + pass + else: + return result + if not six.PY3: # Python 2: + value = value.encode('utf-8') + value = email.utils.encode_rfc2231(value, 'utf-8') + value = '%s*=%s' % (name, value) + return value + + +class RequestField(object): + """ + A data container for request body parameters. + + :param name: + The name of this request field. + :param data: + The data/value body. + :param filename: + An optional filename of the request field. + :param headers: + An optional dict-like object of headers to initially use for the field. + """ + def __init__(self, name, data, filename=None, headers=None): + self._name = name + self._filename = filename + self.data = data + self.headers = {} + if headers: + self.headers = dict(headers) + + @classmethod + def from_tuples(cls, fieldname, value): + """ + A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. + + Supports constructing :class:`~urllib3.fields.RequestField` from parameter + of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) + tuple where the MIME type is optional. For example: :: + + 'foo': 'bar', + 'fakefile': ('foofile.txt', 'contents of foofile'), + 'realfile': ('barfile.txt', open('realfile').read()), + 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), + 'nonamefile': 'contents of nonamefile field', + + Field names and filenames must be unicode. + """ + if isinstance(value, tuple): + if len(value) == 3: + filename, data, content_type = value + else: + filename, data = value + content_type = guess_content_type(filename) + else: + filename = None + content_type = None + data = value + + request_param = cls(fieldname, data, filename=filename) + request_param.make_multipart(content_type=content_type) + + return request_param + + def _render_part(self, name, value): + """ + Overridable helper function to format a single header parameter. + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as a unicode string. + """ + return format_header_param(name, value) + + def _render_parts(self, header_parts): + """ + Helper function to format and quote a single header. + + Useful for single headers that are composed of multiple items. E.g., + 'Content-Disposition' fields. + + :param header_parts: + A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as + `k1="v1"; k2="v2"; ...`. + """ + parts = [] + iterable = header_parts + if isinstance(header_parts, dict): + iterable = header_parts.items() + + for name, value in iterable: + if value: + parts.append(self._render_part(name, value)) + + return '; '.join(parts) + + def render_headers(self): + """ + Renders the headers for this request field. + """ + lines = [] + + sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] + for sort_key in sort_keys: + if self.headers.get(sort_key, False): + lines.append('%s: %s' % (sort_key, self.headers[sort_key])) + + for header_name, header_value in self.headers.items(): + if header_name not in sort_keys: + if header_value: + lines.append('%s: %s' % (header_name, header_value)) + + lines.append('\r\n') + return '\r\n'.join(lines) + + def make_multipart(self, content_disposition=None, content_type=None, content_location=None): + """ + Makes this request field into a multipart request field. + + This method overrides "Content-Disposition", "Content-Type" and + "Content-Location" headers to the request parameter. + + :param content_type: + The 'Content-Type' of the request body. + :param content_location: + The 'Content-Location' of the request body. + + """ + self.headers['Content-Disposition'] = content_disposition or 'form-data' + self.headers['Content-Disposition'] += '; '.join(['', self._render_parts((('name', self._name), ('filename', self._filename)))]) + self.headers['Content-Type'] = content_type + self.headers['Content-Location'] = content_location diff --git a/awx/lib/site-packages/requests/packages/urllib3/filepost.py b/awx/lib/site-packages/requests/packages/urllib3/filepost.py index 470309a006..4575582e91 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/filepost.py +++ b/awx/lib/site-packages/requests/packages/urllib3/filepost.py @@ -1,5 +1,5 @@ # urllib3/filepost.py -# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt) +# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -12,6 +12,7 @@ from io import BytesIO from .packages import six from .packages.six import b +from .fields import RequestField writer = codecs.lookup('utf-8')[3] @@ -23,15 +24,38 @@ def choose_boundary(): return uuid4().hex -def get_content_type(filename): - return mimetypes.guess_type(filename)[0] or 'application/octet-stream' +def iter_field_objects(fields): + """ + Iterate over fields. + + Supports list of (k, v) tuples and dicts, and lists of + :class:`~urllib3.fields.RequestField`. + + """ + if isinstance(fields, dict): + i = six.iteritems(fields) + else: + i = iter(fields) + + for field in i: + if isinstance(field, RequestField): + yield field + else: + yield RequestField.from_tuples(*field) def iter_fields(fields): """ Iterate over fields. + .. deprecated :: + + The addition of `~urllib3.fields.RequestField` makes this function + obsolete. Instead, use :func:`iter_field_objects`, which returns + `~urllib3.fields.RequestField` objects, instead. + Supports list of (k, v) tuples and dicts. + """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) @@ -44,15 +68,7 @@ def encode_multipart_formdata(fields, boundary=None): Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: - Dictionary of fields or list of (key, value) or (key, value, MIME type) - field tuples. The key is treated as the field name, and the value as - the body of the form-data bytes. If the value is a tuple of two - elements, then the first element is treated as the filename of the - form-data section and a suitable MIME type is guessed based on the - filename. If the value is a tuple of three elements, then the third - element is treated as an explicit MIME type of the form-data section. - - Field names and filenames must be unicode. + Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). :param boundary: If not specified, then a random boundary will be generated using @@ -62,24 +78,11 @@ def encode_multipart_formdata(fields, boundary=None): if boundary is None: boundary = choose_boundary() - for fieldname, value in iter_fields(fields): + for field in iter_field_objects(fields): body.write(b('--%s\r\n' % (boundary))) - if isinstance(value, tuple): - if len(value) == 3: - filename, data, content_type = value - else: - filename, data = value - content_type = get_content_type(filename) - writer(body).write('Content-Disposition: form-data; name="%s"; ' - 'filename="%s"\r\n' % (fieldname, filename)) - body.write(b('Content-Type: %s\r\n\r\n' % - (content_type,))) - else: - data = value - writer(body).write('Content-Disposition: form-data; name="%s"\r\n' - % (fieldname)) - body.write(b'\r\n') + writer(body).write(field.render_headers()) + data = field.data if isinstance(data, int): data = str(data) # Backwards compatibility diff --git a/awx/lib/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py b/awx/lib/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py index 9560b04529..2d61ac2139 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py +++ b/awx/lib/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py @@ -7,23 +7,60 @@ __version__ = '3.2.2' class CertificateError(ValueError): pass -def _dnsname_to_pat(dn): +def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ pats = [] - for frag in dn.split(r'.'): - if frag == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - else: - # Otherwise, '*' matches any dotless fragment. - frag = re.escape(frag) - pats.append(frag.replace(r'\*', '[^.]*')) - return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + if not dn: + return False + + parts = dn.split(r'.') + leftmost = parts[0] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survery of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in parts[1:]: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules - are mostly followed, but IP addresses are not accepted for *hostname*. + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. @@ -34,7 +71,7 @@ def match_hostname(cert, hostname): san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': - if _dnsname_to_pat(value).match(hostname): + if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: @@ -45,7 +82,7 @@ def match_hostname(cert, hostname): # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': - if _dnsname_to_pat(value).match(hostname): + if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: diff --git a/awx/lib/site-packages/requests/packages/urllib3/poolmanager.py b/awx/lib/site-packages/requests/packages/urllib3/poolmanager.py index ce0c248ea8..e7f8667ee6 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/poolmanager.py +++ b/awx/lib/site-packages/requests/packages/urllib3/poolmanager.py @@ -6,9 +6,14 @@ import logging +try: # Python 3 + from urllib.parse import urljoin +except ImportError: + from urlparse import urljoin + from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool -from .connectionpool import connection_from_url, port_by_scheme +from .connectionpool import port_by_scheme from .request import RequestMethods from .util import parse_url @@ -55,6 +60,8 @@ class PoolManager(RequestMethods): """ + proxy = None + def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw @@ -94,20 +101,23 @@ class PoolManager(RequestMethods): If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. """ + scheme = scheme or 'http' + port = port or port_by_scheme.get(scheme, 80) pool_key = (scheme, host, port) - # If the scheme, host, or port doesn't match existing open connections, - # open a new ConnectionPool. - pool = self.pools.get(pool_key) - if pool: - return pool + with self.pools.lock: + # If the scheme, host, or port doesn't match existing open + # connections, open a new ConnectionPool. + pool = self.pools.get(pool_key) + if pool: + return pool - # Make a fresh ConnectionPool of the desired type - pool = self._new_pool(scheme, host, port) - self.pools[pool_key] = pool + # Make a fresh ConnectionPool of the desired type + pool = self._new_pool(scheme, host, port) + self.pools[pool_key] = pool return pool def connection_from_url(self, url): @@ -139,12 +149,19 @@ class PoolManager(RequestMethods): if 'headers' not in kw: kw['headers'] = self.headers - response = conn.urlopen(method, u.request_uri, **kw) + if self.proxy is not None and u.scheme == "http": + response = conn.urlopen(method, url, **kw) + else: + response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response + # Support relative URLs for redirecting. + redirect_location = urljoin(url, redirect_location) + + # RFC 2616, Section 10.3.4 if response.status == 303: method = 'GET' @@ -154,15 +171,59 @@ class PoolManager(RequestMethods): return self.urlopen(method, redirect_location, **kw) -class ProxyManager(RequestMethods): +class ProxyManager(PoolManager): """ - Given a ConnectionPool to a proxy, the ProxyManager's ``urlopen`` method - will make requests to any url through the defined proxy. The ProxyManager - class will automatically set the 'Host' header if it is not provided. + Behaves just like :class:`PoolManager`, but sends all requests through + the defined proxy, using the CONNECT method for HTTPS URLs. + + :param poxy_url: + The URL of the proxy to be used. + + :param proxy_headers: + A dictionary contaning headers that will be sent to the proxy. In case + of HTTP they are being sent with each request, while in the + HTTPS/CONNECT case they are sent only once. Could be used for proxy + authentication. + + Example: + >>> proxy = urllib3.ProxyManager('http://localhost:3128/') + >>> r1 = proxy.request('GET', 'http://google.com/') + >>> r2 = proxy.request('GET', 'http://httpbin.org/') + >>> len(proxy.pools) + 1 + >>> r3 = proxy.request('GET', 'https://httpbin.org/') + >>> r4 = proxy.request('GET', 'https://twitter.com/') + >>> len(proxy.pools) + 3 + """ - def __init__(self, proxy_pool): - self.proxy_pool = proxy_pool + def __init__(self, proxy_url, num_pools=10, headers=None, + proxy_headers=None, **connection_pool_kw): + + if isinstance(proxy_url, HTTPConnectionPool): + proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, + proxy_url.port) + proxy = parse_url(proxy_url) + if not proxy.port: + port = port_by_scheme.get(proxy.scheme, 80) + proxy = proxy._replace(port=port) + self.proxy = proxy + self.proxy_headers = proxy_headers or {} + assert self.proxy.scheme in ("http", "https"), \ + 'Not supported proxy scheme %s' % self.proxy.scheme + connection_pool_kw['_proxy'] = self.proxy + connection_pool_kw['_proxy_headers'] = self.proxy_headers + super(ProxyManager, self).__init__( + num_pools, headers, **connection_pool_kw) + + def connection_from_host(self, host, port=None, scheme='http'): + if scheme == "https": + return super(ProxyManager, self).connection_from_host( + host, port, scheme) + + return super(ProxyManager, self).connection_from_host( + self.proxy.host, self.proxy.port, self.proxy.scheme) def _set_proxy_headers(self, url, headers=None): """ @@ -171,22 +232,28 @@ class ProxyManager(RequestMethods): """ headers_ = {'Accept': '*/*'} - host = parse_url(url).host - if host: - headers_['Host'] = host + netloc = parse_url(url).netloc + if netloc: + headers_['Host'] = netloc if headers: headers_.update(headers) - return headers_ - def urlopen(self, method, url, **kw): + def urlopen(self, method, url, redirect=True, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." - kw['assert_same_host'] = False - kw['headers'] = self._set_proxy_headers(url, headers=kw.get('headers')) - return self.proxy_pool.urlopen(method, url, **kw) + u = parse_url(url) + + if u.scheme == "http": + # It's too late to set proxy headers on per-request basis for + # tunnelled HTTPS connections, should use + # constructor's proxy_headers instead. + kw['headers'] = self._set_proxy_headers(url, kw.get('headers', + self.headers)) + kw['headers'].update(self.proxy_headers) + + return super(ProxyManager, self).urlopen(method, url, redirect, **kw) -def proxy_from_url(url, **pool_kw): - proxy_pool = connection_from_url(url, **pool_kw) - return ProxyManager(proxy_pool) +def proxy_from_url(url, **kw): + return ProxyManager(proxy_url=url, **kw) diff --git a/awx/lib/site-packages/requests/packages/urllib3/request.py b/awx/lib/site-packages/requests/packages/urllib3/request.py index bf0256e964..66a9a0e690 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/request.py +++ b/awx/lib/site-packages/requests/packages/urllib3/request.py @@ -30,7 +30,7 @@ class RequestMethods(object): in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are - encoded in the *body* of the request using multipart or www-orm-urlencoded + encoded in the *body* of the request using multipart or www-form-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the diff --git a/awx/lib/site-packages/requests/packages/urllib3/response.py b/awx/lib/site-packages/requests/packages/urllib3/response.py index 2fa407887d..4efff5a13b 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/response.py +++ b/awx/lib/site-packages/requests/packages/urllib3/response.py @@ -1,5 +1,5 @@ # urllib3/response.py -# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt) +# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -7,9 +7,11 @@ import logging import zlib +import io from .exceptions import DecodeError from .packages.six import string_types as basestring, binary_type +from .util import is_fp_closed log = logging.getLogger(__name__) @@ -48,7 +50,7 @@ def _get_decoder(mode): return DeflateDecoder() -class HTTPResponse(object): +class HTTPResponse(io.IOBase): """ HTTP Response container. @@ -72,6 +74,7 @@ class HTTPResponse(object): """ CONTENT_DECODERS = ['gzip', 'deflate'] + REDIRECT_STATUSES = [301, 302, 303, 307, 308] def __init__(self, body='', headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, @@ -105,7 +108,7 @@ class HTTPResponse(object): code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ - if self.status in [301, 302, 303, 307]: + if self.status in self.REDIRECT_STATUSES: return self.headers.get('location') return False @@ -183,11 +186,13 @@ class HTTPResponse(object): try: if decode_content and self._decoder: data = self._decoder.decompress(data) - except (IOError, zlib.error): - raise DecodeError("Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding) + except (IOError, zlib.error) as e: + raise DecodeError( + "Received response with content-encoding: %s, but " + "failed to decode it." % content_encoding, + e) - if flush_decoder and self._decoder: + if flush_decoder and decode_content and self._decoder: buf = self._decoder.decompress(binary_type()) data += buf + self._decoder.flush() @@ -200,6 +205,29 @@ class HTTPResponse(object): if self._original_response and self._original_response.isclosed(): self.release_conn() + def stream(self, amt=2**16, decode_content=None): + """ + A generator wrapper for the read() method. A call will block until + ``amt`` bytes have been read from the connection or until the + connection is closed. + + :param amt: + How much of the content to read. The generator will return up to + much data per iteration, but may return less. This is particularly + likely when using compressed data. However, the empty string will + never be returned. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + while not is_fp_closed(self._fp): + data = self.read(amt=amt, decode_content=decode_content) + + if data: + yield data + + @classmethod def from_httplib(ResponseCls, r, **response_kw): """ @@ -239,3 +267,35 @@ class HTTPResponse(object): def getheader(self, name, default=None): return self.headers.get(name, default) + + # Overrides from io.IOBase + def close(self): + if not self.closed: + self._fp.close() + + @property + def closed(self): + if self._fp is None: + return True + elif hasattr(self._fp, 'closed'): + return self._fp.closed + elif hasattr(self._fp, 'isclosed'): # Python 2 + return self._fp.isclosed() + else: + return True + + def fileno(self): + if self._fp is None: + raise IOError("HTTPResponse has no file to get a fileno from") + elif hasattr(self._fp, "fileno"): + return self._fp.fileno() + else: + raise IOError("The file-like object this HTTPResponse is wrapped " + "around has no file descriptor") + + def flush(self): + if self._fp is not None and hasattr(self._fp, 'flush'): + return self._fp.flush() + + def readable(self): + return True diff --git a/awx/lib/site-packages/requests/packages/urllib3/util.py b/awx/lib/site-packages/requests/packages/urllib3/util.py index 544f9ed9d6..266c9ed32b 100644 --- a/awx/lib/site-packages/requests/packages/urllib3/util.py +++ b/awx/lib/site-packages/requests/packages/urllib3/util.py @@ -6,10 +6,11 @@ from base64 import b64encode -from collections import namedtuple -from socket import error as SocketError -from hashlib import md5, sha1 from binascii import hexlify, unhexlify +from collections import namedtuple +from hashlib import md5, sha1 +from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT +import time try: from select import poll, POLLIN @@ -31,9 +32,234 @@ try: # Test for SSL features except ImportError: pass - from .packages import six -from .exceptions import LocationParseError, SSLError +from .exceptions import LocationParseError, SSLError, TimeoutStateError + + +_Default = object() +# The default timeout to use for socket connections. This is the attribute used +# by httplib to define the default timeout + + +def current_time(): + """ + Retrieve the current time, this function is mocked out in unit testing. + """ + return time.time() + + +class Timeout(object): + """ + Utility object for storing timeout values. + + Example usage: + + .. code-block:: python + + timeout = urllib3.util.Timeout(connect=2.0, read=7.0) + pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout) + pool.request(...) # Etc, etc + + :param connect: + The maximum amount of time to wait for a connection attempt to a server + to succeed. Omitting the parameter will default the connect timeout to + the system default, probably `the global default timeout in socket.py + <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. + None will set an infinite timeout for connection attempts. + + :type connect: integer, float, or None + + :param read: + The maximum amount of time to wait between consecutive + read operations for a response from the server. Omitting + the parameter will default the read timeout to the system + default, probably `the global default timeout in socket.py + <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. + None will set an infinite timeout. + + :type read: integer, float, or None + + :param total: + The maximum amount of time to wait for an HTTP request to connect and + return. This combines the connect and read timeouts into one. In the + event that both a connect timeout and a total are specified, or a read + timeout and a total are specified, the shorter timeout will be applied. + + Defaults to None. + + + :type total: integer, float, or None + + .. note:: + + Many factors can affect the total amount of time for urllib3 to return + an HTTP response. Specifically, Python's DNS resolver does not obey the + timeout specified on the socket. Other factors that can affect total + request time include high CPU load, high swap, the program running at a + low priority level, or other behaviors. The observed running time for + urllib3 to return a response may be greater than the value passed to + `total`. + + In addition, the read and total timeouts only measure the time between + read operations on the socket connecting the client and the server, not + the total amount of time for the request to return a complete response. + As an example, you may want a request to return within 7 seconds or + fail, so you set the ``total`` timeout to 7 seconds. If the server + sends one byte to you every 5 seconds, the request will **not** trigger + time out. This case is admittedly rare. + """ + + #: A sentinel object representing the default timeout value + DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT + + def __init__(self, connect=_Default, read=_Default, total=None): + self._connect = self._validate_timeout(connect, 'connect') + self._read = self._validate_timeout(read, 'read') + self.total = self._validate_timeout(total, 'total') + self._start_connect = None + + def __str__(self): + return '%s(connect=%r, read=%r, total=%r)' % ( + type(self).__name__, self._connect, self._read, self.total) + + + @classmethod + def _validate_timeout(cls, value, name): + """ Check that a timeout attribute is valid + + :param value: The timeout value to validate + :param name: The name of the timeout attribute to validate. This is used + for clear error messages + :return: the value + :raises ValueError: if the type is not an integer or a float, or if it + is a numeric value less than zero + """ + if value is _Default: + return cls.DEFAULT_TIMEOUT + + if value is None or value is cls.DEFAULT_TIMEOUT: + return value + + try: + float(value) + except (TypeError, ValueError): + raise ValueError("Timeout value %s was %s, but it must be an " + "int or float." % (name, value)) + + try: + if value < 0: + raise ValueError("Attempted to set %s timeout to %s, but the " + "timeout cannot be set to a value less " + "than 0." % (name, value)) + except TypeError: # Python 3 + raise ValueError("Timeout value %s was %s, but it must be an " + "int or float." % (name, value)) + + return value + + @classmethod + def from_float(cls, timeout): + """ Create a new Timeout from a legacy timeout value. + + The timeout value used by httplib.py sets the same timeout on the + connect(), and recv() socket requests. This creates a :class:`Timeout` + object that sets the individual timeouts to the ``timeout`` value passed + to this function. + + :param timeout: The legacy timeout value + :type timeout: integer, float, sentinel default object, or None + :return: a Timeout object + :rtype: :class:`Timeout` + """ + return Timeout(read=timeout, connect=timeout) + + def clone(self): + """ Create a copy of the timeout object + + Timeout properties are stored per-pool but each request needs a fresh + Timeout object to ensure each one has its own start/stop configured. + + :return: a copy of the timeout object + :rtype: :class:`Timeout` + """ + # We can't use copy.deepcopy because that will also create a new object + # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to + # detect the user default. + return Timeout(connect=self._connect, read=self._read, + total=self.total) + + def start_connect(self): + """ Start the timeout clock, used during a connect() attempt + + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to start a timer that has been started already. + """ + if self._start_connect is not None: + raise TimeoutStateError("Timeout timer has already been started.") + self._start_connect = current_time() + return self._start_connect + + def get_connect_duration(self): + """ Gets the time elapsed since the call to :meth:`start_connect`. + + :return: the elapsed time + :rtype: float + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to get duration for a timer that hasn't been started. + """ + if self._start_connect is None: + raise TimeoutStateError("Can't get connect duration for timer " + "that has not started.") + return current_time() - self._start_connect + + @property + def connect_timeout(self): + """ Get the value to use when setting a connection timeout. + + This will be a positive float or integer, the value None + (never timeout), or the default system timeout. + + :return: the connect timeout + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + """ + if self.total is None: + return self._connect + + if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: + return self.total + + return min(self._connect, self.total) + + @property + def read_timeout(self): + """ Get the value for the read timeout. + + This assumes some time has elapsed in the connection timeout and + computes the read timeout appropriately. + + If self.total is set, the read timeout is dependent on the amount of + time taken by the connect timeout. If the connection time has not been + established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be + raised. + + :return: the value to use for the read timeout + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` + has not yet been called on this object. + """ + if (self.total is not None and + self.total is not self.DEFAULT_TIMEOUT and + self._read is not None and + self._read is not self.DEFAULT_TIMEOUT): + # in case the connect timeout has not yet been established. + if self._start_connect is None: + return self._read + return max(0, min(self.total - self.get_connect_duration(), + self._read)) + elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: + return max(0, self.total - self.get_connect_duration()) + else: + return self._read class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])): @@ -61,6 +287,13 @@ class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', return uri + @property + def netloc(self): + """Network location including host and port""" + if self.port: + return '%s:%d' % (self.host, self.port) + return self.host + def split_first(s, delims): """ @@ -114,7 +347,7 @@ def parse_url(url): # While this code has overlap with stdlib's urlparse, it is much # simplified for our needs and less annoying. - # Additionally, this imeplementations does silly things to be optimal + # Additionally, this implementations does silly things to be optimal # on CPython. scheme = None @@ -143,7 +376,8 @@ def parse_url(url): # IPv6 if url and url[0] == '[': - host, url = url[1:].split(']', 1) + host, url = url.split(']', 1) + host += ']' # Port if ':' in url: @@ -341,6 +575,20 @@ def assert_fingerprint(cert, fingerprint): .format(hexlify(fingerprint_bytes), hexlify(cert_digest))) +def is_fp_closed(obj): + """ + Checks whether a given file-like object is closed. + + :param obj: + The file-like object to check. + """ + if hasattr(obj, 'fp'): + # Object is a container for another file-like object that gets released + # on exhaustion (e.g. HTTPResponse) + return obj.fp is None + + return obj.closed + if SSLContext is not None: # Python 3.2+ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, diff --git a/awx/lib/site-packages/requests/sessions.py b/awx/lib/site-packages/requests/sessions.py index f4aeeee6df..aa956d3f3f 100644 --- a/awx/lib/site-packages/requests/sessions.py +++ b/awx/lib/site-packages/requests/sessions.py @@ -71,15 +71,10 @@ class SessionRedirectMixin(object): """Receives a Response. Returns a generator of Responses.""" i = 0 - prepared_request = PreparedRequest() - prepared_request.body = req.body - prepared_request.headers = req.headers.copy() - prepared_request.hooks = req.hooks - prepared_request.method = req.method - prepared_request.url = req.url # ((resp.status_code is codes.see_other)) while (('location' in resp.headers and resp.status_code in REDIRECT_STATI)): + prepared_request = req.copy() resp.content # Consume socket so it can be released @@ -90,13 +85,18 @@ class SessionRedirectMixin(object): resp.close() url = resp.headers['location'] - method = prepared_request.method + method = req.method # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (parsed_rurl.scheme, url) + # The scheme should be lower case... + if '://' in url: + scheme, uri = url.split('://', 1) + url = '%s://%s' % (scheme.lower(), uri) + # Facilitate non-RFC2616-compliant 'location' headers # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. @@ -109,12 +109,12 @@ class SessionRedirectMixin(object): # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 if (resp.status_code == codes.see_other and - prepared_request.method != 'HEAD'): + method != 'HEAD'): method = 'GET' # Do what the browsers do, despite standards... if (resp.status_code in (codes.moved, codes.found) and - prepared_request.method not in ('GET', 'HEAD')): + method not in ('GET', 'HEAD')): method = 'GET' prepared_request.method = method @@ -153,7 +153,7 @@ class SessionRedirectMixin(object): class Session(SessionRedirectMixin): """A Requests session. - Provides cookie persistience, connection-pooling, and configuration. + Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: @@ -208,7 +208,10 @@ class Session(SessionRedirectMixin): #: Should we trust the environment? self.trust_env = True - # Set up a CookieJar to be used by default + #: A CookieJar containing all currently outstanding cookies set on this + #: session. By default it is a + #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but + #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. @@ -222,6 +225,46 @@ class Session(SessionRedirectMixin): def __exit__(self, *args): self.close() + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest <PreparedRequest>` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request <Request>` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = RequestsCookieJar() + merged_cookies.update(self.cookies) + merged_cookies.update(cookies) + + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_setting(request.hooks, self.hooks), + ) + return p + def request(self, method, url, params=None, data=None, @@ -265,20 +308,22 @@ class Session(SessionRedirectMixin): :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ + # Create the Request. + req = Request( + method = method.upper(), + url = url, + headers = headers, + files = files, + data = data or {}, + params = params or {}, + auth = auth, + cookies = cookies, + hooks = hooks, + ) + prep = self.prepare_request(req) - cookies = cookies or {} proxies = proxies or {} - # Bootstrap CookieJar. - if not isinstance(cookies, cookielib.CookieJar): - cookies = cookiejar_from_dict(cookies) - - # Merge with session cookies - merged_cookies = RequestsCookieJar() - merged_cookies.update(self.cookies) - merged_cookies.update(cookies) - cookies = merged_cookies - # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. @@ -286,10 +331,6 @@ class Session(SessionRedirectMixin): for (k, v) in env_proxies.items(): proxies.setdefault(k, v) - # Set environment's basic authentication. - if not auth: - auth = get_netrc_auth(url) - # Look for configuration. if not verify and verify is not False: verify = os.environ.get('REQUESTS_CA_BUNDLE') @@ -299,30 +340,11 @@ class Session(SessionRedirectMixin): verify = os.environ.get('CURL_CA_BUNDLE') # Merge all the kwargs. - params = merge_setting(params, self.params) - headers = merge_setting(headers, self.headers, dict_class=CaseInsensitiveDict) - auth = merge_setting(auth, self.auth) proxies = merge_setting(proxies, self.proxies) - hooks = merge_setting(hooks, self.hooks) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) - # Create the Request. - req = Request() - req.method = method.upper() - req.url = url - req.headers = headers - req.files = files - req.data = data - req.params = params - req.auth = auth - req.cookies = cookies - req.hooks = hooks - - # Prepare the Request. - prep = req.prepare() - # Send the request. send_kwargs = { 'stream': stream, @@ -416,7 +438,7 @@ class Session(SessionRedirectMixin): # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. - if getattr(request, 'prepare', None): + if not isinstance(request, PreparedRequest): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of @@ -443,6 +465,10 @@ class Session(SessionRedirectMixin): r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies + if r.history: + # If the hooks create history then we want those cookies too + for resp in r.history: + extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. @@ -467,7 +493,7 @@ class Session(SessionRedirectMixin): """Returns the appropriate connnection adapter for the given URL.""" for (prefix, adapter) in self.adapters.items(): - if url.startswith(prefix): + if url.lower().startswith(prefix): return adapter # Nothing matches :-/ @@ -475,7 +501,7 @@ class Session(SessionRedirectMixin): def close(self): """Closes all adapters and as such the session""" - for _, v in self.adapters.items(): + for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): diff --git a/awx/lib/site-packages/requests/status_codes.py b/awx/lib/site-packages/requests/status_codes.py index de384865fc..ed7a8660a6 100644 --- a/awx/lib/site-packages/requests/status_codes.py +++ b/awx/lib/site-packages/requests/status_codes.py @@ -18,7 +18,8 @@ _codes = { 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), - 208: ('im_used',), + 208: ('already_reported',), + 226: ('im_used',), # Redirection. 300: ('multiple_choices',), diff --git a/awx/lib/site-packages/requests/structures.py b/awx/lib/site-packages/requests/structures.py index 8d02ea67b6..a1759137aa 100644 --- a/awx/lib/site-packages/requests/structures.py +++ b/awx/lib/site-packages/requests/structures.py @@ -103,7 +103,7 @@ class CaseInsensitiveDict(collections.MutableMapping): # Copy is required def copy(self): - return CaseInsensitiveDict(self._store.values()) + return CaseInsensitiveDict(self._store.values()) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, dict(self.items())) diff --git a/awx/lib/site-packages/requests/utils.py b/awx/lib/site-packages/requests/utils.py index b21bf8fb76..3ec61312aa 100644 --- a/awx/lib/site-packages/requests/utils.py +++ b/awx/lib/site-packages/requests/utils.py @@ -21,9 +21,11 @@ from netrc import netrc, NetrcParseError from . import __version__ from . import certs from .compat import parse_http_list as _parse_list_header -from .compat import quote, urlparse, bytes, str, OrderedDict, urlunparse +from .compat import (quote, urlparse, bytes, str, OrderedDict, urlunparse, + is_py2, is_py3, builtin_str, getproxies, proxy_bypass) from .cookies import RequestsCookieJar, cookiejar_from_dict from .structures import CaseInsensitiveDict +from .exceptions import MissingSchema, InvalidURL _hush_pyflakes = (RequestsCookieJar,) @@ -264,8 +266,12 @@ def get_encodings_from_content(content): """ charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) + pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) + xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') - return charset_re.findall(content) + return (charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content)) def get_encoding_from_headers(headers): @@ -301,7 +307,7 @@ def stream_decode_response_unicode(iterator, r): rv = decoder.decode(chunk) if rv: yield rv - rv = decoder.decode('', final=True) + rv = decoder.decode(b'', final=True) if rv: yield rv @@ -361,7 +367,11 @@ def unquote_unreserved(uri): for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): - c = chr(int(h, 16)) + try: + c = chr(int(h, 16)) + except ValueError: + raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) + if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: @@ -386,25 +396,17 @@ def requote_uri(uri): def get_environ_proxies(url): """Return a dict of environment proxies.""" - proxy_keys = [ - 'all', - 'http', - 'https', - 'ftp', - 'socks' - ] - get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy = get_proxy('no_proxy') + netloc = urlparse(url).netloc if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the netloc, both with and without the port. - no_proxy = no_proxy.split(',') - netloc = urlparse(url).netloc + no_proxy = no_proxy.replace(' ', '').split(',') for host in no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): @@ -412,10 +414,15 @@ def get_environ_proxies(url): # to apply the proxies on this URL. return {} + # If the system proxy settings indicate that this URL should be bypassed, + # don't proxy. + if proxy_bypass(netloc): + return {} + # If we get here, we either didn't have no_proxy set or we're not going - # anywhere that no_proxy applies to. - proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys] - return dict([(key, val) for (key, val) in proxies if val]) + # anywhere that no_proxy applies to, and the system settings don't require + # bypassing the proxy for the current URL. + return getproxies() def default_user_agent(): @@ -526,18 +533,13 @@ def guess_json_utf(data): return None -def prepend_scheme_if_needed(url, new_scheme): - '''Given a URL that may or may not have a scheme, prepend the given scheme. - Does not replace a present scheme with the one provided as an argument.''' - scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) +def except_on_missing_scheme(url): + """Given a URL, raise a MissingSchema exception if the scheme is missing. + """ + scheme, netloc, path, params, query, fragment = urlparse(url) - # urlparse is a finicky beast, and sometimes decides that there isn't a - # netloc present. Assume that it's being over-cautious, and switch netloc - # and path if urlparse decided there was no netloc. - if not netloc: - netloc, path = path, netloc - - return urlunparse((scheme, netloc, path, params, query, fragment)) + if not scheme: + raise MissingSchema('Proxy URLs must have explicit schemes.') def get_auth_from_url(url): @@ -548,3 +550,22 @@ def get_auth_from_url(url): return (parsed.username, parsed.password) else: return ('', '') + + +def to_native_string(string, encoding='ascii'): + """ + Given a string object, regardless of type, returns a representation of that + string in the native string type, encoding and decoding where necessary. + This assumes ASCII unless told otherwise. + """ + out = None + + if isinstance(string, builtin_str): + out = string + else: + if is_py2: + out = string.encode(encoding) + else: + out = string.decode(encoding) + + return out diff --git a/awx/lib/site-packages/rest_framework/__init__.py b/awx/lib/site-packages/rest_framework/__init__.py index 087808e0b2..2bd2991baa 100644 --- a/awx/lib/site-packages/rest_framework/__init__.py +++ b/awx/lib/site-packages/rest_framework/__init__.py @@ -1,4 +1,4 @@ -__version__ = '2.3.7' +__version__ = '2.3.8' VERSION = __version__ # synonym diff --git a/awx/lib/site-packages/rest_framework/compat.py b/awx/lib/site-packages/rest_framework/compat.py index 6f7447add0..b9d1dae6b2 100644 --- a/awx/lib/site-packages/rest_framework/compat.py +++ b/awx/lib/site-packages/rest_framework/compat.py @@ -47,6 +47,12 @@ try: except ImportError: django_filters = None +# guardian is optional +try: + import guardian +except ImportError: + guardian = None + # cStringIO only if it's available, otherwise StringIO try: diff --git a/awx/lib/site-packages/rest_framework/fields.py b/awx/lib/site-packages/rest_framework/fields.py index add9d224d3..210c2537da 100644 --- a/awx/lib/site-packages/rest_framework/fields.py +++ b/awx/lib/site-packages/rest_framework/fields.py @@ -16,6 +16,7 @@ from django.core import validators from django.core.exceptions import ValidationError from django.conf import settings from django.db.models.fields import BLANK_CHOICE_DASH +from django.http import QueryDict from django.forms import widgets from django.utils.encoding import is_protected_type from django.utils.translation import ugettext_lazy as _ @@ -307,7 +308,10 @@ class WritableField(Field): try: if self.use_files: files = files or {} - native = files[field_name] + try: + native = files[field_name] + except KeyError: + native = data[field_name] else: native = data[field_name] except KeyError: @@ -399,10 +403,15 @@ class BooleanField(WritableField): } empty = False - # Note: we set default to `False` in order to fill in missing value not - # supplied by html form. TODO: Fix so that only html form input gets - # this behavior. - default = False + def field_from_native(self, data, files, field_name, into): + # HTML checkboxes do not explicitly represent unchecked as `False` + # we deal with that here... + if isinstance(data, QueryDict): + self.default = False + + return super(BooleanField, self).field_from_native( + data, files, field_name, into + ) def from_native(self, value): if value in ('true', 't', 'True', '1'): @@ -505,6 +514,11 @@ class ChoiceField(WritableField): return True return False + def from_native(self, value): + if value in validators.EMPTY_VALUES: + return None + return super(ChoiceField, self).from_native(value) + class EmailField(CharField): type_name = 'EmailField' diff --git a/awx/lib/site-packages/rest_framework/filters.py b/awx/lib/site-packages/rest_framework/filters.py index 4079e1bd55..1693bcd24c 100644 --- a/awx/lib/site-packages/rest_framework/filters.py +++ b/awx/lib/site-packages/rest_framework/filters.py @@ -4,7 +4,7 @@ returned by list views. """ from __future__ import unicode_literals from django.db import models -from rest_framework.compat import django_filters, six +from rest_framework.compat import django_filters, six, guardian from functools import reduce import operator @@ -140,3 +140,24 @@ class OrderingFilter(BaseFilterBackend): return queryset.order_by(*ordering) return queryset + + +class DjangoObjectPermissionsFilter(BaseFilterBackend): + """ + A filter backend that limits results to those where the requesting user + has read object level permissions. + """ + def __init__(self): + assert guardian, 'Using DjangoObjectPermissionsFilter, but django-guardian is not installed' + + perm_format = '%(app_label)s.view_%(model_name)s' + + def filter_queryset(self, request, queryset, view): + user = request.user + model_cls = queryset.model + kwargs = { + 'app_label': model_cls._meta.app_label, + 'model_name': model_cls._meta.module_name + } + permission = self.perm_format % kwargs + return guardian.shortcuts.get_objects_for_user(user, permission, queryset) diff --git a/awx/lib/site-packages/rest_framework/generics.py b/awx/lib/site-packages/rest_framework/generics.py index 99e9782e21..7d1bf79451 100644 --- a/awx/lib/site-packages/rest_framework/generics.py +++ b/awx/lib/site-packages/rest_framework/generics.py @@ -14,6 +14,17 @@ from rest_framework.settings import api_settings import warnings +def strict_positive_int(integer_string, cutoff=None): + """ + Cast a string to a strictly positive integer. + """ + ret = int(integer_string) + if ret <= 0: + raise ValueError() + if cutoff: + ret = min(ret, cutoff) + return ret + def get_object_or_404(queryset, **filter_kwargs): """ Same as Django's standard shortcut, but make sure to raise 404 @@ -47,6 +58,7 @@ class GenericAPIView(views.APIView): # Pagination settings paginate_by = api_settings.PAGINATE_BY paginate_by_param = api_settings.PAGINATE_BY_PARAM + max_paginate_by = api_settings.MAX_PAGINATE_BY pagination_serializer_class = api_settings.DEFAULT_PAGINATION_SERIALIZER_CLASS page_kwarg = 'page' @@ -135,7 +147,7 @@ class GenericAPIView(views.APIView): page_query_param = self.request.QUERY_PARAMS.get(self.page_kwarg) page = page_kwarg or page_query_param or 1 try: - page_number = int(page) + page_number = strict_positive_int(page) except ValueError: if page == 'last': page_number = paginator.num_pages @@ -196,9 +208,11 @@ class GenericAPIView(views.APIView): PendingDeprecationWarning, stacklevel=2) if self.paginate_by_param: - query_params = self.request.QUERY_PARAMS try: - return int(query_params[self.paginate_by_param]) + return strict_positive_int( + self.request.QUERY_PARAMS[self.paginate_by_param], + cutoff=self.max_paginate_by + ) except (KeyError, ValueError): pass @@ -342,8 +356,15 @@ class GenericAPIView(views.APIView): self.check_permissions(cloned_request) # Test object permissions if method == 'PUT': - self.get_object() - except (exceptions.APIException, PermissionDenied, Http404): + try: + self.get_object() + except Http404: + # Http404 should be acceptable and the serializer + # metadata should be populated. Except this so the + # outer "else" clause of the try-except-else block + # will be executed. + pass + except (exceptions.APIException, PermissionDenied): pass else: # If user has appropriate permissions for the view, include diff --git a/awx/lib/site-packages/rest_framework/mixins.py b/awx/lib/site-packages/rest_framework/mixins.py index f11def6d41..426865ff93 100644 --- a/awx/lib/site-packages/rest_framework/mixins.py +++ b/awx/lib/site-packages/rest_framework/mixins.py @@ -142,11 +142,16 @@ class UpdateModelMixin(object): try: return self.get_object() except Http404: - # If this is a PUT-as-create operation, we need to ensure that - # we have relevant permissions, as if this was a POST request. - # This will either raise a PermissionDenied exception, - # or simply return None - self.check_permissions(clone_request(self.request, 'POST')) + if self.request.method == 'PUT': + # For PUT-as-create operation, we need to ensure that we have + # relevant permissions, as if this was a POST request. This + # will either raise a PermissionDenied exception, or simply + # return None. + self.check_permissions(clone_request(self.request, 'POST')) + else: + # PATCH requests where the object does not exist should still + # return a 404 response. + raise def pre_save(self, obj): """ diff --git a/awx/lib/site-packages/rest_framework/parsers.py b/awx/lib/site-packages/rest_framework/parsers.py index 96bfac84a0..98fc03417d 100644 --- a/awx/lib/site-packages/rest_framework/parsers.py +++ b/awx/lib/site-packages/rest_framework/parsers.py @@ -10,9 +10,9 @@ from django.core.files.uploadhandler import StopFutureHandlers from django.http import QueryDict from django.http.multipartparser import MultiPartParser as DjangoMultiPartParser from django.http.multipartparser import MultiPartParserError, parse_header, ChunkIter -from rest_framework.compat import yaml, etree +from rest_framework.compat import etree, six, yaml from rest_framework.exceptions import ParseError -from rest_framework.compat import six +from rest_framework import renderers import json import datetime import decimal @@ -47,6 +47,7 @@ class JSONParser(BaseParser): """ media_type = 'application/json' + renderer_class = renderers.UnicodeJSONRenderer def parse(self, stream, media_type=None, parser_context=None): """ @@ -121,7 +122,8 @@ class MultiPartParser(BaseParser): parser_context = parser_context or {} request = parser_context['request'] encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) - meta = request.META + meta = request.META.copy() + meta['CONTENT_TYPE'] = media_type upload_handlers = request.upload_handlers try: @@ -129,7 +131,7 @@ class MultiPartParser(BaseParser): data, files = parser.parse() return DataAndFiles(data, files) except MultiPartParserError as exc: - raise ParseError('Multipart form parse error - %s' % six.u(exc)) + raise ParseError('Multipart form parse error - %s' % str(exc)) class XMLParser(BaseParser): diff --git a/awx/lib/site-packages/rest_framework/permissions.py b/awx/lib/site-packages/rest_framework/permissions.py index 1036663e05..531847988f 100644 --- a/awx/lib/site-packages/rest_framework/permissions.py +++ b/awx/lib/site-packages/rest_framework/permissions.py @@ -7,6 +7,7 @@ import warnings SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS'] +from django.http import Http404 from rest_framework.compat import oauth2_provider_scope, oauth2_constants @@ -151,6 +152,65 @@ class DjangoModelPermissionsOrAnonReadOnly(DjangoModelPermissions): authenticated_users_only = False +class DjangoObjectPermissions(DjangoModelPermissions): + """ + The request is authenticated using Django's object-level permissions. + It requires an object-permissions-enabled backend, such as Django Guardian. + + It ensures that the user is authenticated, and has the appropriate + `add`/`change`/`delete` permissions on the object using .has_perms. + + This permission can only be applied against view classes that + provide a `.model` or `.queryset` attribute. + """ + + perms_map = { + 'GET': [], + 'OPTIONS': [], + 'HEAD': [], + 'POST': ['%(app_label)s.add_%(model_name)s'], + 'PUT': ['%(app_label)s.change_%(model_name)s'], + 'PATCH': ['%(app_label)s.change_%(model_name)s'], + 'DELETE': ['%(app_label)s.delete_%(model_name)s'], + } + + def get_required_object_permissions(self, method, model_cls): + kwargs = { + 'app_label': model_cls._meta.app_label, + 'model_name': model_cls._meta.module_name + } + return [perm % kwargs for perm in self.perms_map[method]] + + def has_object_permission(self, request, view, obj): + model_cls = getattr(view, 'model', None) + queryset = getattr(view, 'queryset', None) + + if model_cls is None and queryset is not None: + model_cls = queryset.model + + perms = self.get_required_object_permissions(request.method, model_cls) + user = request.user + + if not user.has_perms(perms, obj): + # If the user does not have permissions we need to determine if + # they have read permissions to see 403, or not, and simply see + # a 404 reponse. + + if request.method in ('GET', 'OPTIONS', 'HEAD'): + # Read permissions already checked and failed, no need + # to make another lookup. + raise Http404 + + read_perms = self.get_required_object_permissions('GET', model_cls) + if not user.has_perms(read_perms, obj): + raise Http404 + + # Has read permissions. + return False + + return True + + class TokenHasReadWriteScope(BasePermission): """ The request is authenticated as a user and the token used has the right scope diff --git a/awx/lib/site-packages/rest_framework/relations.py b/awx/lib/site-packages/rest_framework/relations.py index edaf76d6ee..35c00bf1df 100644 --- a/awx/lib/site-packages/rest_framework/relations.py +++ b/awx/lib/site-packages/rest_framework/relations.py @@ -134,9 +134,9 @@ class RelatedField(WritableField): value = obj for component in source.split('.'): - value = get_component(value, component) if value is None: break + value = get_component(value, component) except ObjectDoesNotExist: return None @@ -244,6 +244,8 @@ class PrimaryKeyRelatedField(RelatedField): source = self.source or field_name queryset = obj for component in source.split('.'): + if queryset is None: + return [] queryset = get_component(queryset, component) # Forward relationship @@ -262,7 +264,7 @@ class PrimaryKeyRelatedField(RelatedField): # RelatedObject (reverse relationship) try: pk = getattr(obj, self.source or field_name).pk - except ObjectDoesNotExist: + except (ObjectDoesNotExist, AttributeError): return None # Forward relationship @@ -567,8 +569,13 @@ class HyperlinkedIdentityField(Field): May raise a `NoReverseMatch` if the `view_name` and `lookup_field` attributes are not configured to correctly match the URL conf. """ - lookup_field = getattr(obj, self.lookup_field) + lookup_field = getattr(obj, self.lookup_field, None) kwargs = {self.lookup_field: lookup_field} + + # Handle unsaved object case + if lookup_field is None: + return None + try: return reverse(view_name, kwargs=kwargs, request=request, format=format) except NoReverseMatch: diff --git a/awx/lib/site-packages/rest_framework/renderers.py b/awx/lib/site-packages/rest_framework/renderers.py index 3a03ca3324..fca67eeeb0 100644 --- a/awx/lib/site-packages/rest_framework/renderers.py +++ b/awx/lib/site-packages/rest_framework/renderers.py @@ -21,11 +21,10 @@ from rest_framework.compat import six from rest_framework.compat import smart_text from rest_framework.compat import yaml from rest_framework.settings import api_settings -from rest_framework.request import clone_request +from rest_framework.request import is_form_media_type, override_method from rest_framework.utils import encoders from rest_framework.utils.breadcrumbs import get_breadcrumbs -from rest_framework.utils.formatting import get_view_name, get_view_description -from rest_framework import exceptions, parsers, status, VERSION +from rest_framework import exceptions, status, VERSION class BaseRenderer(object): @@ -37,6 +36,7 @@ class BaseRenderer(object): media_type = None format = None charset = 'utf-8' + render_style = 'text' def render(self, data, accepted_media_type=None, renderer_context=None): raise NotImplemented('Renderer class requires .render() to be implemented') @@ -52,16 +52,17 @@ class JSONRenderer(BaseRenderer): format = 'json' encoder_class = encoders.JSONEncoder ensure_ascii = True - charset = 'utf-8' - # Note that JSON encodings must be utf-8, utf-16 or utf-32. + charset = None + # JSON is a binary encoding, that can be encoded as utf-8, utf-16 or utf-32. # See: http://www.ietf.org/rfc/rfc4627.txt + # Also: http://lucumr.pocoo.org/2013/7/19/application-mimetypes-and-encodings/ def render(self, data, accepted_media_type=None, renderer_context=None): """ Render `data` into JSON. """ if data is None: - return '' + return bytes() # If 'indent' is provided in the context, then pretty print the result. # E.g. If we're being called by the BrowsableAPIRenderer. @@ -86,13 +87,12 @@ class JSONRenderer(BaseRenderer): # and may (or may not) be unicode. # On python 3.x json.dumps() returns unicode strings. if isinstance(ret, six.text_type): - return bytes(ret.encode(self.charset)) + return bytes(ret.encode('utf-8')) return ret class UnicodeJSONRenderer(JSONRenderer): ensure_ascii = False - charset = 'utf-8' """ Renderer which serializes to JSON. Does *not* apply JSON's character escaping for non-ascii characters. @@ -109,6 +109,7 @@ class JSONPRenderer(JSONRenderer): format = 'jsonp' callback_parameter = 'callback' default_callback = 'callback' + charset = 'utf-8' def get_callback(self, renderer_context): """ @@ -317,6 +318,90 @@ class StaticHTMLRenderer(TemplateHTMLRenderer): return data +class HTMLFormRenderer(BaseRenderer): + """ + Renderers serializer data into an HTML form. + + If the serializer was instantiated without an object then this will + return an HTML form not bound to any object, + otherwise it will return an HTML form with the appropriate initial data + populated from the object. + + Note that rendering of field and form errors is not currently supported. + """ + media_type = 'text/html' + format = 'form' + template = 'rest_framework/form.html' + charset = 'utf-8' + + def data_to_form_fields(self, data): + fields = {} + for key, val in data.fields.items(): + if getattr(val, 'read_only', True): + # Don't include read-only fields. + continue + + if getattr(val, 'fields', None): + # Nested data not supported by HTML forms. + continue + + kwargs = {} + kwargs['required'] = val.required + + #if getattr(v, 'queryset', None): + # kwargs['queryset'] = v.queryset + + if getattr(val, 'choices', None) is not None: + kwargs['choices'] = val.choices + + if getattr(val, 'regex', None) is not None: + kwargs['regex'] = val.regex + + if getattr(val, 'widget', None): + widget = copy.deepcopy(val.widget) + kwargs['widget'] = widget + + if getattr(val, 'default', None) is not None: + kwargs['initial'] = val.default + + if getattr(val, 'label', None) is not None: + kwargs['label'] = val.label + + if getattr(val, 'help_text', None) is not None: + kwargs['help_text'] = val.help_text + + fields[key] = val.form_field_class(**kwargs) + + return fields + + def render(self, data, accepted_media_type=None, renderer_context=None): + """ + Render serializer data and return an HTML form, as a string. + """ + # The HTMLFormRenderer currently uses something of a hack to render + # the content, by translating each of the serializer fields into + # an html form field, creating a dynamic form using those fields, + # and then rendering that form. + + # This isn't strictly neccessary, as we could render the serilizer + # fields to HTML directly. The implementation is historical and will + # likely change at some point. + + self.renderer_context = renderer_context or {} + request = renderer_context['request'] + + # Creating an on the fly form see: + # http://stackoverflow.com/questions/3915024/dynamically-creating-classes-python + fields = self.data_to_form_fields(data) + DynamicForm = type(str('DynamicForm'), (forms.Form,), fields) + data = None if data.empty else data + + template = loader.get_template(self.template) + context = RequestContext(request, {'form': DynamicForm(data)}) + + return template.render(context) + + class BrowsableAPIRenderer(BaseRenderer): """ HTML renderer used to self-document the API. @@ -325,6 +410,7 @@ class BrowsableAPIRenderer(BaseRenderer): format = 'api' template = 'rest_framework/api.html' charset = 'utf-8' + form_renderer_class = HTMLFormRenderer def get_default_renderer(self, view): """ @@ -349,7 +435,10 @@ class BrowsableAPIRenderer(BaseRenderer): renderer_context['indent'] = 4 content = renderer.render(data, accepted_media_type, renderer_context) - if renderer.charset is None: + render_style = getattr(renderer, 'render_style', 'text') + assert render_style in ['text', 'binary'], 'Expected .render_style ' \ + '"text" or "binary", but got "%s"' % render_style + if render_style == 'binary': return '[%d bytes of binary content]' % len(content) return content @@ -372,136 +461,105 @@ class BrowsableAPIRenderer(BaseRenderer): return False # Doesn't have permissions return True - def serializer_to_form_fields(self, serializer): - fields = {} - for k, v in serializer.get_fields().items(): - if getattr(v, 'read_only', True): - continue - - kwargs = {} - kwargs['required'] = v.required - - #if getattr(v, 'queryset', None): - # kwargs['queryset'] = v.queryset - - if getattr(v, 'choices', None) is not None: - kwargs['choices'] = v.choices - - if getattr(v, 'regex', None) is not None: - kwargs['regex'] = v.regex - - if getattr(v, 'widget', None): - widget = copy.deepcopy(v.widget) - kwargs['widget'] = widget - - if getattr(v, 'default', None) is not None: - kwargs['initial'] = v.default - - if getattr(v, 'label', None) is not None: - kwargs['label'] = v.label - - if getattr(v, 'help_text', None) is not None: - kwargs['help_text'] = v.help_text - - fields[k] = v.form_field_class(**kwargs) - - return fields - - def _get_form(self, view, method, request): - # We need to impersonate a request with the correct method, - # so that eg. any dynamic get_serializer_class methods return the - # correct form for each method. - restore = view.request - request = clone_request(request, method) - view.request = request - try: - return self.get_form(view, method, request) - finally: - view.request = restore - - def _get_raw_data_form(self, view, method, request, media_types): - # We need to impersonate a request with the correct method, - # so that eg. any dynamic get_serializer_class methods return the - # correct form for each method. - restore = view.request - request = clone_request(request, method) - view.request = request - try: - return self.get_raw_data_form(view, method, request, media_types) - finally: - view.request = restore - - def get_form(self, view, method, request): + def get_rendered_html_form(self, view, method, request): """ - Get a form, possibly bound to either the input or output data. - In the absence on of the Resource having an associated form then - provide a form that can be used to submit arbitrary content. + Return a string representing a rendered HTML form, possibly bound to + either the input or output data. + + In the absence of the View having an associated form then return None. """ - obj = getattr(view, 'object', None) - if not self.show_form_for_method(view, method, request, obj): - return + with override_method(view, request, method) as request: + obj = getattr(view, 'object', None) + if not self.show_form_for_method(view, method, request, obj): + return - if method in ('DELETE', 'OPTIONS'): - return True # Don't actually need to return a form + if method in ('DELETE', 'OPTIONS'): + return True # Don't actually need to return a form - if not getattr(view, 'get_serializer', None) or not parsers.FormParser in view.parser_classes: - return + if (not getattr(view, 'get_serializer', None) + or not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)): + return - serializer = view.get_serializer(instance=obj) - fields = self.serializer_to_form_fields(serializer) + serializer = view.get_serializer(instance=obj) - # Creating an on the fly form see: - # http://stackoverflow.com/questions/3915024/dynamically-creating-classes-python - OnTheFlyForm = type(str("OnTheFlyForm"), (forms.Form,), fields) - data = (obj is not None) and serializer.data or None - form_instance = OnTheFlyForm(data) - return form_instance + data = serializer.data + form_renderer = self.form_renderer_class() + return form_renderer.render(data, self.accepted_media_type, self.renderer_context) - def get_raw_data_form(self, view, method, request, media_types): + def get_raw_data_form(self, view, method, request): """ Returns a form that allows for arbitrary content types to be tunneled via standard HTML forms. (Which are typically application/x-www-form-urlencoded) """ + with override_method(view, request, method) as request: + # If we're not using content overloading there's no point in + # supplying a generic form, as the view won't treat the form's + # value as the content of the request. + if not (api_settings.FORM_CONTENT_OVERRIDE + and api_settings.FORM_CONTENTTYPE_OVERRIDE): + return None - # If we're not using content overloading there's no point in supplying a generic form, - # as the view won't treat the form's value as the content of the request. - if not (api_settings.FORM_CONTENT_OVERRIDE - and api_settings.FORM_CONTENTTYPE_OVERRIDE): - return None + # Check permissions + obj = getattr(view, 'object', None) + if not self.show_form_for_method(view, method, request, obj): + return - # Check permissions - obj = getattr(view, 'object', None) - if not self.show_form_for_method(view, method, request, obj): - return + # If possible, serialize the initial content for the generic form + default_parser = view.parser_classes[0] + renderer_class = getattr(default_parser, 'renderer_class', None) + if (hasattr(view, 'get_serializer') and renderer_class): + # View has a serializer defined and parser class has a + # corresponding renderer that can be used to render the data. - content_type_field = api_settings.FORM_CONTENTTYPE_OVERRIDE - content_field = api_settings.FORM_CONTENT_OVERRIDE - choices = [(media_type, media_type) for media_type in media_types] - initial = media_types[0] + # Get a read-only version of the serializer + serializer = view.get_serializer(instance=obj) + if obj is None: + for name, field in serializer.fields.items(): + if getattr(field, 'read_only', None): + del serializer.fields[name] - # NB. http://jacobian.org/writing/dynamic-form-generation/ - class GenericContentForm(forms.Form): - def __init__(self): - super(GenericContentForm, self).__init__() + # Render the raw data content + renderer = renderer_class() + accepted = self.accepted_media_type + context = self.renderer_context.copy() + context['indent'] = 4 + content = renderer.render(serializer.data, accepted, context) + else: + content = None - self.fields[content_type_field] = forms.ChoiceField( - label='Media type', - choices=choices, - initial=initial - ) - self.fields[content_field] = forms.CharField( - label='Content', - widget=forms.Textarea - ) + # Generate a generic form that includes a content type field, + # and a content field. + content_type_field = api_settings.FORM_CONTENTTYPE_OVERRIDE + content_field = api_settings.FORM_CONTENT_OVERRIDE - return GenericContentForm() + media_types = [parser.media_type for parser in view.parser_classes] + choices = [(media_type, media_type) for media_type in media_types] + initial = media_types[0] + + # NB. http://jacobian.org/writing/dynamic-form-generation/ + class GenericContentForm(forms.Form): + def __init__(self): + super(GenericContentForm, self).__init__() + + self.fields[content_type_field] = forms.ChoiceField( + label='Media type', + choices=choices, + initial=initial + ) + self.fields[content_field] = forms.CharField( + label='Content', + widget=forms.Textarea, + initial=content + ) + + return GenericContentForm() def get_name(self, view): - return get_view_name(view.__class__, getattr(view, 'suffix', None)) + return view.get_view_name() def get_description(self, view): - return get_view_description(view.__class__, html=True) + return view.get_view_description(html=True) def get_breadcrumbs(self, request): return get_breadcrumbs(request.path) @@ -510,26 +568,25 @@ class BrowsableAPIRenderer(BaseRenderer): """ Render the HTML for the browsable API representation. """ - accepted_media_type = accepted_media_type or '' - renderer_context = renderer_context or {} + self.accepted_media_type = accepted_media_type or '' + self.renderer_context = renderer_context or {} view = renderer_context['view'] request = renderer_context['request'] response = renderer_context['response'] - media_types = [parser.media_type for parser in view.parser_classes] renderer = self.get_default_renderer(view) content = self.get_content(renderer, data, accepted_media_type, renderer_context) - put_form = self._get_form(view, 'PUT', request) - post_form = self._get_form(view, 'POST', request) - patch_form = self._get_form(view, 'PATCH', request) - delete_form = self._get_form(view, 'DELETE', request) - options_form = self._get_form(view, 'OPTIONS', request) + put_form = self.get_rendered_html_form(view, 'PUT', request) + post_form = self.get_rendered_html_form(view, 'POST', request) + patch_form = self.get_rendered_html_form(view, 'PATCH', request) + delete_form = self.get_rendered_html_form(view, 'DELETE', request) + options_form = self.get_rendered_html_form(view, 'OPTIONS', request) - raw_data_put_form = self._get_raw_data_form(view, 'PUT', request, media_types) - raw_data_post_form = self._get_raw_data_form(view, 'POST', request, media_types) - raw_data_patch_form = self._get_raw_data_form(view, 'PATCH', request, media_types) + raw_data_put_form = self.get_raw_data_form(view, 'PUT', request) + raw_data_post_form = self.get_raw_data_form(view, 'POST', request) + raw_data_patch_form = self.get_raw_data_form(view, 'PATCH', request) raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form name = self.get_name(view) @@ -582,3 +639,4 @@ class MultiPartRenderer(BaseRenderer): def render(self, data, accepted_media_type=None, renderer_context=None): return encode_multipart(self.BOUNDARY, data) + diff --git a/awx/lib/site-packages/rest_framework/request.py b/awx/lib/site-packages/rest_framework/request.py index 919716f49a..977d4d9657 100644 --- a/awx/lib/site-packages/rest_framework/request.py +++ b/awx/lib/site-packages/rest_framework/request.py @@ -28,6 +28,29 @@ def is_form_media_type(media_type): base_media_type == 'multipart/form-data') +class override_method(object): + """ + A context manager that temporarily overrides the method on a request, + additionally setting the `view.request` attribute. + + Usage: + + with override_method(view, request, 'POST') as request: + ... # Do stuff with `view` and `request` + """ + def __init__(self, view, request, method): + self.view = view + self.request = request + self.method = method + + def __enter__(self): + self.view.request = clone_request(self.request, self.method) + return self.view.request + + def __exit__(self, *args, **kwarg): + self.view.request = self.request + + class Empty(object): """ Placeholder for unset attributes. diff --git a/awx/lib/site-packages/rest_framework/routers.py b/awx/lib/site-packages/rest_framework/routers.py index 930011d39e..3fee1e494c 100644 --- a/awx/lib/site-packages/rest_framework/routers.py +++ b/awx/lib/site-packages/rest_framework/routers.py @@ -189,7 +189,11 @@ class SimpleRouter(BaseRouter): Given a viewset, return the portion of URL regex that is used to match against a single instance. """ - base_regex = '(?P<{lookup_field}>[^/]+)' + if self.trailing_slash: + base_regex = '(?P<{lookup_field}>[^/]+)' + else: + # Don't consume `.json` style suffixes + base_regex = '(?P<{lookup_field}>[^/.]+)' lookup_field = getattr(viewset, 'lookup_field', 'pk') return base_regex.format(lookup_field=lookup_field) diff --git a/awx/lib/site-packages/rest_framework/runtests/settings.py b/awx/lib/site-packages/rest_framework/runtests/settings.py index b3702d0bfa..be72165801 100644 --- a/awx/lib/site-packages/rest_framework/runtests/settings.py +++ b/awx/lib/site-packages/rest_framework/runtests/settings.py @@ -123,6 +123,21 @@ else: 'provider.oauth2', ) +# guardian is optional +try: + import guardian +except ImportError: + pass +else: + ANONYMOUS_USER_ID = -1 + AUTHENTICATION_BACKENDS = ( + 'django.contrib.auth.backends.ModelBackend', # default + 'guardian.backends.ObjectPermissionBackend', + ) + INSTALLED_APPS += ( + 'guardian', + ) + STATIC_URL = '/static/' PASSWORD_HASHERS = ( diff --git a/awx/lib/site-packages/rest_framework/serializers.py b/awx/lib/site-packages/rest_framework/serializers.py index 31cfa34474..a63c7f6c29 100644 --- a/awx/lib/site-packages/rest_framework/serializers.py +++ b/awx/lib/site-packages/rest_framework/serializers.py @@ -32,6 +32,9 @@ from rest_framework.relations import * from rest_framework.fields import * +class RelationsList(list): + _deleted = [] + class NestedValidationError(ValidationError): """ The default ValidationError behavior is to stringify each item in the list @@ -161,7 +164,6 @@ class BaseSerializer(WritableField): self._data = None self._files = None self._errors = None - self._deleted = None if many and instance is not None and not hasattr(instance, '__iter__'): raise ValueError('instance should be a queryset or other iterable with many=True') @@ -298,7 +300,8 @@ class BaseSerializer(WritableField): Serialize objects -> primitives. """ ret = self._dict_class() - ret.fields = {} + ret.fields = self._dict_class() + ret.empty = obj is None for field_name, field in self.fields.items(): field.initialize(parent=self, field_name=field_name) @@ -331,14 +334,15 @@ class BaseSerializer(WritableField): if self.source == '*': return self.to_native(obj) + # Get the raw field value try: source = self.source or field_name value = obj for component in source.split('.'): - value = get_component(value, component) if value is None: break + value = get_component(value, component) except ObjectDoesNotExist: return None @@ -378,6 +382,7 @@ class BaseSerializer(WritableField): # Set the serializer object if it exists obj = getattr(self.parent.object, field_name) if self.parent.object else None + obj = obj.all() if is_simple_callable(getattr(obj, 'all', None)) else obj if self.source == '*': if value: @@ -391,7 +396,8 @@ class BaseSerializer(WritableField): 'data': value, 'context': self.context, 'partial': self.partial, - 'many': self.many + 'many': self.many, + 'allow_add_remove': self.allow_add_remove } serializer = self.__class__(**kwargs) @@ -434,7 +440,7 @@ class BaseSerializer(WritableField): DeprecationWarning, stacklevel=3) if many: - ret = [] + ret = RelationsList() errors = [] update = self.object is not None @@ -461,8 +467,8 @@ class BaseSerializer(WritableField): ret.append(self.from_native(item, None)) errors.append(self._errors) - if update: - self._deleted = identity_to_objects.values() + if update and self.allow_add_remove: + ret._deleted = identity_to_objects.values() self._errors = any(errors) and errors or [] else: @@ -514,12 +520,12 @@ class BaseSerializer(WritableField): """ if isinstance(self.object, list): [self.save_object(item, **kwargs) for item in self.object] + + if self.object._deleted: + [self.delete_object(item) for item in self.object._deleted] else: self.save_object(self.object, **kwargs) - if self.allow_add_remove and self._deleted: - [self.delete_object(item) for item in self._deleted] - return self.object def metadata(self): @@ -795,9 +801,12 @@ class ModelSerializer(Serializer): cls = self.opts.model opts = get_concrete_model(cls)._meta exclusions = [field.name for field in opts.fields + opts.many_to_many] + for field_name, field in self.fields.items(): field_name = field.source or field_name - if field_name in exclusions and not field.read_only: + if field_name in exclusions \ + and not field.read_only \ + and not isinstance(field, Serializer): exclusions.remove(field_name) return exclusions @@ -823,6 +832,7 @@ class ModelSerializer(Serializer): """ m2m_data = {} related_data = {} + nested_forward_relations = {} meta = self.opts.model._meta # Reverse fk or one-to-one relations @@ -842,6 +852,12 @@ class ModelSerializer(Serializer): if field.name in attrs: m2m_data[field.name] = attrs.pop(field.name) + # Nested forward relations - These need to be marked so we can save + # them before saving the parent model instance. + for field_name in attrs.keys(): + if isinstance(self.fields.get(field_name, None), Serializer): + nested_forward_relations[field_name] = attrs[field_name] + # Update an existing instance... if instance is not None: for key, val in attrs.items(): @@ -857,6 +873,7 @@ class ModelSerializer(Serializer): # at the point of save. instance._related_data = related_data instance._m2m_data = m2m_data + instance._nested_forward_relations = nested_forward_relations return instance @@ -872,6 +889,14 @@ class ModelSerializer(Serializer): """ Save the deserialized object and return it. """ + if getattr(obj, '_nested_forward_relations', None): + # Nested relationships need to be saved before we can save the + # parent instance. + for field_name, sub_object in obj._nested_forward_relations.items(): + if sub_object: + self.save_object(sub_object) + setattr(obj, field_name, sub_object) + obj.save(**kwargs) if getattr(obj, '_m2m_data', None): @@ -881,7 +906,25 @@ class ModelSerializer(Serializer): if getattr(obj, '_related_data', None): for accessor_name, related in obj._related_data.items(): - setattr(obj, accessor_name, related) + if isinstance(related, RelationsList): + # Nested reverse fk relationship + for related_item in related: + fk_field = obj._meta.get_field_by_name(accessor_name)[0].field.name + setattr(related_item, fk_field, obj) + self.save_object(related_item) + + # Delete any removed objects + if related._deleted: + [self.delete_object(item) for item in related._deleted] + + elif isinstance(related, models.Model): + # Nested reverse one-one relationship + fk_field = obj._meta.get_field_by_name(accessor_name)[0].field.name + setattr(related, fk_field, obj) + self.save_object(related) + else: + # Reverse FK or reverse one-one + setattr(obj, accessor_name, related) del(obj._related_data) @@ -903,6 +946,7 @@ class HyperlinkedModelSerializer(ModelSerializer): _options_class = HyperlinkedModelSerializerOptions _default_view_name = '%(model_name)s-detail' _hyperlink_field_class = HyperlinkedRelatedField + _hyperlink_identify_field_class = HyperlinkedIdentityField def get_default_fields(self): fields = super(HyperlinkedModelSerializer, self).get_default_fields() @@ -911,7 +955,7 @@ class HyperlinkedModelSerializer(ModelSerializer): self.opts.view_name = self._get_default_view_name(self.opts.model) if 'url' not in fields: - url_field = HyperlinkedIdentityField( + url_field = self._hyperlink_identify_field_class( view_name=self.opts.view_name, lookup_field=self.opts.lookup_field ) diff --git a/awx/lib/site-packages/rest_framework/settings.py b/awx/lib/site-packages/rest_framework/settings.py index 8fd177d586..8abaf1409a 100644 --- a/awx/lib/site-packages/rest_framework/settings.py +++ b/awx/lib/site-packages/rest_framework/settings.py @@ -48,7 +48,6 @@ DEFAULTS = { ), 'DEFAULT_THROTTLE_CLASSES': ( ), - 'DEFAULT_CONTENT_NEGOTIATION_CLASS': 'rest_framework.negotiation.DefaultContentNegotiation', @@ -68,11 +67,19 @@ DEFAULTS = { # Pagination 'PAGINATE_BY': None, 'PAGINATE_BY_PARAM': None, + 'MAX_PAGINATE_BY': None, # Authentication 'UNAUTHENTICATED_USER': 'django.contrib.auth.models.AnonymousUser', 'UNAUTHENTICATED_TOKEN': None, + # View configuration + 'VIEW_NAME_FUNCTION': 'rest_framework.views.get_view_name', + 'VIEW_DESCRIPTION_FUNCTION': 'rest_framework.views.get_view_description', + + # Exception handling + 'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler', + # Testing 'TEST_REQUEST_RENDERER_CLASSES': ( 'rest_framework.renderers.MultiPartRenderer', @@ -121,10 +128,13 @@ IMPORT_STRINGS = ( 'DEFAULT_MODEL_SERIALIZER_CLASS', 'DEFAULT_PAGINATION_SERIALIZER_CLASS', 'DEFAULT_FILTER_BACKENDS', + 'EXCEPTION_HANDLER', 'FILTER_BACKEND', 'TEST_REQUEST_RENDERER_CLASSES', 'UNAUTHENTICATED_USER', 'UNAUTHENTICATED_TOKEN', + 'VIEW_NAME_FUNCTION', + 'VIEW_DESCRIPTION_FUNCTION' ) diff --git a/awx/lib/site-packages/rest_framework/static/rest_framework/js/default.js b/awx/lib/site-packages/rest_framework/static/rest_framework/js/default.js index c74829d7d5..bcb1964dbe 100644 --- a/awx/lib/site-packages/rest_framework/static/rest_framework/js/default.js +++ b/awx/lib/site-packages/rest_framework/static/rest_framework/js/default.js @@ -1,13 +1,56 @@ +function getCookie(c_name) +{ + // From http://www.w3schools.com/js/js_cookies.asp + var c_value = document.cookie; + var c_start = c_value.indexOf(" " + c_name + "="); + if (c_start == -1) { + c_start = c_value.indexOf(c_name + "="); + } + if (c_start == -1) { + c_value = null; + } else { + c_start = c_value.indexOf("=", c_start) + 1; + var c_end = c_value.indexOf(";", c_start); + if (c_end == -1) { + c_end = c_value.length; + } + c_value = unescape(c_value.substring(c_start,c_end)); + } + return c_value; +} + +// JSON highlighting. prettyPrint(); +// Bootstrap tooltips. $('.js-tooltip').tooltip({ delay: 1000 }); +// Deal with rounded tab styling after tab clicks. $('a[data-toggle="tab"]:first').on('shown', function (e) { $(e.target).parents('.tabbable').addClass('first-tab-active'); }); $('a[data-toggle="tab"]:not(:first)').on('shown', function (e) { $(e.target).parents('.tabbable').removeClass('first-tab-active'); }); -$('.form-switcher a:first').tab('show'); + +$('a[data-toggle="tab"]').click(function(){ + document.cookie="tabstyle=" + this.name + "; path=/"; +}); + +// Store tab preference in cookies & display appropriate tab on load. +var selectedTab = null; +var selectedTabName = getCookie('tabstyle'); + +if (selectedTabName) { + selectedTab = $('.form-switcher a[name=' + selectedTabName + ']'); +} + +if (selectedTab && selectedTab.length > 0) { + // Display whichever tab is selected. + selectedTab.tab('show'); +} else { + // If no tab selected, display rightmost tab. + $('.form-switcher a:first').tab('show'); +} diff --git a/awx/lib/site-packages/rest_framework/templates/rest_framework/base.html b/awx/lib/site-packages/rest_framework/templates/rest_framework/base.html index 51f9c2916b..aa90e90c4f 100644 --- a/awx/lib/site-packages/rest_framework/templates/rest_framework/base.html +++ b/awx/lib/site-packages/rest_framework/templates/rest_framework/base.html @@ -128,17 +128,17 @@ <div {% if post_form %}class="tabbable"{% endif %}> {% if post_form %} <ul class="nav nav-tabs form-switcher"> - <li><a href="#object-form" data-toggle="tab">HTML form</a></li> - <li><a href="#generic-content-form" data-toggle="tab">Raw data</a></li> + <li><a name='html-tab' href="#object-form" data-toggle="tab">HTML form</a></li> + <li><a name='raw-tab' href="#generic-content-form" data-toggle="tab">Raw data</a></li> </ul> {% endif %} <div class="well tab-content"> {% if post_form %} <div class="tab-pane" id="object-form"> {% with form=post_form %} - <form action="{{ request.get_full_path }}" method="POST" {% if form.is_multipart %}enctype="multipart/form-data"{% endif %} class="form-horizontal"> + <form action="{{ request.get_full_path }}" method="POST" enctype="multipart/form-data" class="form-horizontal"> <fieldset> - {% include "rest_framework/form.html" %} + {{ post_form }} <div class="form-actions"> <button class="btn btn-primary" title="Make a POST request on the {{ name }} resource">POST</button> </div> @@ -167,23 +167,21 @@ <div {% if put_form %}class="tabbable"{% endif %}> {% if put_form %} <ul class="nav nav-tabs form-switcher"> - <li><a href="#object-form" data-toggle="tab">HTML form</a></li> - <li><a href="#generic-content-form" data-toggle="tab">Raw data</a></li> + <li><a name='html-tab' href="#object-form" data-toggle="tab">HTML form</a></li> + <li><a name='raw-tab' href="#generic-content-form" data-toggle="tab">Raw data</a></li> </ul> {% endif %} <div class="well tab-content"> {% if put_form %} <div class="tab-pane" id="object-form"> - {% with form=put_form %} - <form action="{{ request.get_full_path }}" method="POST" {% if form.is_multipart %}enctype="multipart/form-data"{% endif %} class="form-horizontal"> + <form action="{{ request.get_full_path }}" method="POST" enctype="multipart/form-data" class="form-horizontal"> <fieldset> - {% include "rest_framework/form.html" %} + {{ put_form }} <div class="form-actions"> <button class="btn btn-primary js-tooltip" name="{{ api_settings.FORM_METHOD_OVERRIDE }}" value="PUT" title="Make a PUT request on the {{ name }} resource">PUT</button> </div> </fieldset> </form> - {% endwith %} </div> {% endif %} <div {% if put_form %}class="tab-pane"{% endif %} id="generic-content-form"> diff --git a/awx/lib/site-packages/rest_framework/test.py b/awx/lib/site-packages/rest_framework/test.py index a18f5a2938..234d10a4a4 100644 --- a/awx/lib/site-packages/rest_framework/test.py +++ b/awx/lib/site-packages/rest_framework/test.py @@ -134,6 +134,8 @@ class APIClient(APIRequestFactory, DjangoClient): """ self.handler._force_user = user self.handler._force_token = token + if user is None: + self.logout() # Also clear any possible session info if required def request(self, **kwargs): # Ensure that any credentials set get added to every request. diff --git a/awx/lib/site-packages/rest_framework/tests/test_description.py b/awx/lib/site-packages/rest_framework/tests/test_description.py index 8019f5ecaf..4c03c1dedd 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_description.py +++ b/awx/lib/site-packages/rest_framework/tests/test_description.py @@ -6,7 +6,6 @@ from rest_framework.compat import apply_markdown, smart_text from rest_framework.views import APIView from rest_framework.tests.description import ViewWithNonASCIICharactersInDocstring from rest_framework.tests.description import UTF8_TEST_DOCSTRING -from rest_framework.utils.formatting import get_view_name, get_view_description # We check that docstrings get nicely un-indented. DESCRIPTION = """an example docstring @@ -58,7 +57,7 @@ class TestViewNamesAndDescriptions(TestCase): """ class MockView(APIView): pass - self.assertEqual(get_view_name(MockView), 'Mock') + self.assertEqual(MockView().get_view_name(), 'Mock') def test_view_description_uses_docstring(self): """Ensure view descriptions are based on the docstring.""" @@ -78,7 +77,7 @@ class TestViewNamesAndDescriptions(TestCase): # hash style header #""" - self.assertEqual(get_view_description(MockView), DESCRIPTION) + self.assertEqual(MockView().get_view_description(), DESCRIPTION) def test_view_description_supports_unicode(self): """ @@ -86,7 +85,7 @@ class TestViewNamesAndDescriptions(TestCase): """ self.assertEqual( - get_view_description(ViewWithNonASCIICharactersInDocstring), + ViewWithNonASCIICharactersInDocstring().get_view_description(), smart_text(UTF8_TEST_DOCSTRING) ) @@ -97,7 +96,7 @@ class TestViewNamesAndDescriptions(TestCase): """ class MockView(APIView): pass - self.assertEqual(get_view_description(MockView), '') + self.assertEqual(MockView().get_view_description(), '') def test_markdown(self): """ diff --git a/awx/lib/site-packages/rest_framework/tests/test_fields.py b/awx/lib/site-packages/rest_framework/tests/test_fields.py index 6836ec86f6..34fbab9c92 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_fields.py +++ b/awx/lib/site-packages/rest_framework/tests/test_fields.py @@ -688,6 +688,14 @@ class ChoiceFieldTests(TestCase): f = serializers.ChoiceField(required=False, choices=self.SAMPLE_CHOICES) self.assertEqual(f.choices, models.fields.BLANK_CHOICE_DASH + self.SAMPLE_CHOICES) + def test_from_native_empty(self): + """ + Make sure from_native() returns None on empty param. + """ + f = serializers.ChoiceField(choices=self.SAMPLE_CHOICES) + result = f.from_native('') + self.assertEqual(result, None) + class EmailFieldTests(TestCase): """ @@ -896,3 +904,12 @@ class CustomIntegerField(TestCase): self.assertFalse(serializer.is_valid()) +class BooleanField(TestCase): + """ + Tests for BooleanField + """ + def test_boolean_required(self): + class BooleanRequiredSerializer(serializers.Serializer): + bool_field = serializers.BooleanField(required=True) + + self.assertFalse(BooleanRequiredSerializer(data={}).is_valid()) diff --git a/awx/lib/site-packages/rest_framework/tests/test_files.py b/awx/lib/site-packages/rest_framework/tests/test_files.py index 487046aca4..c13c38b868 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_files.py +++ b/awx/lib/site-packages/rest_framework/tests/test_files.py @@ -7,13 +7,13 @@ import datetime class UploadedFile(object): - def __init__(self, file, created=None): + def __init__(self, file=None, created=None): self.file = file self.created = created or datetime.datetime.now() class UploadedFileSerializer(serializers.Serializer): - file = serializers.FileField() + file = serializers.FileField(required=False) created = serializers.DateTimeField() def restore_object(self, attrs, instance=None): @@ -47,5 +47,36 @@ class FileSerializerTests(TestCase): now = datetime.datetime.now() serializer = UploadedFileSerializer(data={'created': now}) + self.assertTrue(serializer.is_valid()) + self.assertEqual(serializer.object.created, now) + self.assertIsNone(serializer.object.file) + + def test_remove_with_empty_string(self): + """ + Passing empty string as data should cause file to be removed + + Test for: + https://github.com/tomchristie/django-rest-framework/issues/937 + """ + now = datetime.datetime.now() + file = BytesIO(six.b('stuff')) + file.name = 'stuff.txt' + file.size = len(file.getvalue()) + + uploaded_file = UploadedFile(file=file, created=now) + + serializer = UploadedFileSerializer(instance=uploaded_file, data={'created': now, 'file': ''}) + self.assertTrue(serializer.is_valid()) + self.assertEqual(serializer.object.created, uploaded_file.created) + self.assertIsNone(serializer.object.file) + + def test_validation_error_with_non_file(self): + """ + Passing non-files should raise a validation error. + """ + now = datetime.datetime.now() + errmsg = 'No file was submitted. Check the encoding type on the form.' + + serializer = UploadedFileSerializer(data={'created': now, 'file': 'abc'}) self.assertFalse(serializer.is_valid()) - self.assertIn('file', serializer.errors) + self.assertEqual(serializer.errors, {'file': [errmsg]}) diff --git a/awx/lib/site-packages/rest_framework/tests/test_generics.py b/awx/lib/site-packages/rest_framework/tests/test_generics.py index 1550880b56..79cd99ac51 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_generics.py +++ b/awx/lib/site-packages/rest_framework/tests/test_generics.py @@ -272,6 +272,48 @@ class TestInstanceView(TestCase): self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, expected) + def test_options_before_instance_create(self): + """ + OPTIONS requests to RetrieveUpdateDestroyAPIView should return metadata + before the instance has been created + """ + request = factory.options('/999') + with self.assertNumQueries(1): + response = self.view(request, pk=999).render() + expected = { + 'parses': [ + 'application/json', + 'application/x-www-form-urlencoded', + 'multipart/form-data' + ], + 'renders': [ + 'application/json', + 'text/html' + ], + 'name': 'Instance', + 'description': 'Example description for OPTIONS.', + 'actions': { + 'PUT': { + 'text': { + 'max_length': 100, + 'read_only': False, + 'required': True, + 'type': 'string', + 'label': 'Text comes here', + 'help_text': 'Text description.' + }, + 'id': { + 'read_only': True, + 'required': False, + 'type': 'integer', + 'label': 'ID', + }, + } + } + } + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data, expected) + def test_get_instance_view_incorrect_arg(self): """ GET requests with an incorrect pk type, should raise 404, not 500. @@ -338,6 +380,17 @@ class TestInstanceView(TestCase): new_obj = SlugBasedModel.objects.get(slug='test_slug') self.assertEqual(new_obj.text, 'foobar') + def test_patch_cannot_create_an_object(self): + """ + PATCH requests should not be able to create objects. + """ + data = {'text': 'foobar'} + request = factory.patch('/999', data, format='json') + with self.assertNumQueries(1): + response = self.view(request, pk=999).render() + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + self.assertFalse(self.objects.filter(id=999).exists()) + class TestOverriddenGetObject(TestCase): """ diff --git a/awx/lib/site-packages/rest_framework/tests/test_pagination.py b/awx/lib/site-packages/rest_framework/tests/test_pagination.py index 85d4640ead..4170d4b641 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_pagination.py +++ b/awx/lib/site-packages/rest_framework/tests/test_pagination.py @@ -42,6 +42,16 @@ class PaginateByParamView(generics.ListAPIView): paginate_by_param = 'page_size' +class MaxPaginateByView(generics.ListAPIView): + """ + View for testing custom max_paginate_by usage + """ + model = BasicModel + paginate_by = 3 + max_paginate_by = 5 + paginate_by_param = 'page_size' + + class IntegrationTestPagination(TestCase): """ Integration tests for paginated list views. @@ -313,6 +323,43 @@ class TestCustomPaginateByParam(TestCase): self.assertEqual(response.data['results'], self.data[:5]) +class TestMaxPaginateByParam(TestCase): + """ + Tests for list views with max_paginate_by kwarg + """ + + def setUp(self): + """ + Create 13 BasicModel instances. + """ + for i in range(13): + BasicModel(text=i).save() + self.objects = BasicModel.objects + self.data = [ + {'id': obj.id, 'text': obj.text} + for obj in self.objects.all() + ] + self.view = MaxPaginateByView.as_view() + + def test_max_paginate_by(self): + """ + If max_paginate_by is set, it should limit page size for the view. + """ + request = factory.get('/?page_size=10') + response = self.view(request).render() + self.assertEqual(response.data['count'], 13) + self.assertEqual(response.data['results'], self.data[:5]) + + def test_max_paginate_by_without_page_size_param(self): + """ + If max_paginate_by is set, but client does not specifiy page_size, + standard `paginate_by` behavior should be used. + """ + request = factory.get('/') + response = self.view(request).render() + self.assertEqual(response.data['results'], self.data[:3]) + + ### Tests for context in pagination serializers class CustomField(serializers.Field): diff --git a/awx/lib/site-packages/rest_framework/tests/test_permissions.py b/awx/lib/site-packages/rest_framework/tests/test_permissions.py index e2cca3808c..d08124f484 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_permissions.py +++ b/awx/lib/site-packages/rest_framework/tests/test_permissions.py @@ -1,18 +1,17 @@ from __future__ import unicode_literals -from django.contrib.auth.models import User, Permission +from django.contrib.auth.models import User, Permission, Group from django.db import models from django.test import TestCase +from django.utils import unittest from rest_framework import generics, status, permissions, authentication, HTTP_HEADER_ENCODING +from rest_framework.compat import guardian +from rest_framework.filters import DjangoObjectPermissionsFilter from rest_framework.test import APIRequestFactory +from rest_framework.tests.models import BasicModel import base64 factory = APIRequestFactory() - -class BasicModel(models.Model): - text = models.CharField(max_length=100) - - class RootView(generics.ListCreateAPIView): model = BasicModel authentication_classes = [authentication.BasicAuthentication] @@ -144,45 +143,158 @@ class ModelPermissionsIntegrationTests(TestCase): self.assertEqual(list(response.data['actions'].keys()), ['PUT']) -class OwnerModel(models.Model): +class BasicPermModel(models.Model): text = models.CharField(max_length=100) - owner = models.ForeignKey(User) + + class Meta: + app_label = 'tests' + permissions = ( + ('view_basicpermmodel', 'Can view basic perm model'), + # add, change, delete built in to django + ) + +# Custom object-level permission, that includes 'view' permissions +class ViewObjectPermissions(permissions.DjangoObjectPermissions): + perms_map = { + 'GET': ['%(app_label)s.view_%(model_name)s'], + 'OPTIONS': ['%(app_label)s.view_%(model_name)s'], + 'HEAD': ['%(app_label)s.view_%(model_name)s'], + 'POST': ['%(app_label)s.add_%(model_name)s'], + 'PUT': ['%(app_label)s.change_%(model_name)s'], + 'PATCH': ['%(app_label)s.change_%(model_name)s'], + 'DELETE': ['%(app_label)s.delete_%(model_name)s'], + } -class IsOwnerPermission(permissions.BasePermission): - def has_object_permission(self, request, view, obj): - return request.user == obj.owner - - -class OwnerInstanceView(generics.RetrieveUpdateDestroyAPIView): - model = OwnerModel +class ObjectPermissionInstanceView(generics.RetrieveUpdateDestroyAPIView): + model = BasicPermModel authentication_classes = [authentication.BasicAuthentication] - permission_classes = [IsOwnerPermission] + permission_classes = [ViewObjectPermissions] + +object_permissions_view = ObjectPermissionInstanceView.as_view() -owner_instance_view = OwnerInstanceView.as_view() +class ObjectPermissionListView(generics.ListAPIView): + model = BasicPermModel + authentication_classes = [authentication.BasicAuthentication] + permission_classes = [ViewObjectPermissions] + +object_permissions_list_view = ObjectPermissionListView.as_view() +@unittest.skipUnless(guardian, 'django-guardian not installed') class ObjectPermissionsIntegrationTests(TestCase): """ Integration tests for the object level permissions API. """ + @classmethod + def setUpClass(cls): + from guardian.shortcuts import assign_perm + + # create users + create = User.objects.create_user + users = { + 'fullaccess': create('fullaccess', 'fullaccess@example.com', 'password'), + 'readonly': create('readonly', 'readonly@example.com', 'password'), + 'writeonly': create('writeonly', 'writeonly@example.com', 'password'), + 'deleteonly': create('deleteonly', 'deleteonly@example.com', 'password'), + } + + # give everyone model level permissions, as we are not testing those + everyone = Group.objects.create(name='everyone') + model_name = BasicPermModel._meta.module_name + app_label = BasicPermModel._meta.app_label + f = '{0}_{1}'.format + perms = { + 'view': f('view', model_name), + 'change': f('change', model_name), + 'delete': f('delete', model_name) + } + for perm in perms.values(): + perm = '{0}.{1}'.format(app_label, perm) + assign_perm(perm, everyone) + everyone.user_set.add(*users.values()) + + cls.perms = perms + cls.users = users def setUp(self): - User.objects.create_user('not_owner', 'not_owner@example.com', 'password') - user = User.objects.create_user('owner', 'owner@example.com', 'password') + from guardian.shortcuts import assign_perm + perms = self.perms + users = self.users - self.not_owner_credentials = basic_auth_header('not_owner', 'password') - self.owner_credentials = basic_auth_header('owner', 'password') + # appropriate object level permissions + readers = Group.objects.create(name='readers') + writers = Group.objects.create(name='writers') + deleters = Group.objects.create(name='deleters') - OwnerModel(text='foo', owner=user).save() + model = BasicPermModel.objects.create(text='foo') + + assign_perm(perms['view'], readers, model) + assign_perm(perms['change'], writers, model) + assign_perm(perms['delete'], deleters, model) - def test_owner_has_delete_permissions(self): - request = factory.delete('/1', HTTP_AUTHORIZATION=self.owner_credentials) - response = owner_instance_view(request, pk='1') + readers.user_set.add(users['fullaccess'], users['readonly']) + writers.user_set.add(users['fullaccess'], users['writeonly']) + deleters.user_set.add(users['fullaccess'], users['deleteonly']) + + self.credentials = {} + for user in users.values(): + self.credentials[user.username] = basic_auth_header(user.username, 'password') + + # Delete + def test_can_delete_permissions(self): + request = factory.delete('/1', HTTP_AUTHORIZATION=self.credentials['deleteonly']) + response = object_permissions_view(request, pk='1') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - def test_non_owner_does_not_have_delete_permissions(self): - request = factory.delete('/1', HTTP_AUTHORIZATION=self.not_owner_credentials) - response = owner_instance_view(request, pk='1') + def test_cannot_delete_permissions(self): + request = factory.delete('/1', HTTP_AUTHORIZATION=self.credentials['readonly']) + response = object_permissions_view(request, pk='1') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) + + # Update + def test_can_update_permissions(self): + request = factory.patch('/1', {'text': 'foobar'}, format='json', + HTTP_AUTHORIZATION=self.credentials['writeonly']) + response = object_permissions_view(request, pk='1') + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data.get('text'), 'foobar') + + def test_cannot_update_permissions(self): + request = factory.patch('/1', {'text': 'foobar'}, format='json', + HTTP_AUTHORIZATION=self.credentials['deleteonly']) + response = object_permissions_view(request, pk='1') + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_cannot_update_permissions_non_existing(self): + request = factory.patch('/999', {'text': 'foobar'}, format='json', + HTTP_AUTHORIZATION=self.credentials['deleteonly']) + response = object_permissions_view(request, pk='999') + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + # Read + def test_can_read_permissions(self): + request = factory.get('/1', HTTP_AUTHORIZATION=self.credentials['readonly']) + response = object_permissions_view(request, pk='1') + self.assertEqual(response.status_code, status.HTTP_200_OK) + + def test_cannot_read_permissions(self): + request = factory.get('/1', HTTP_AUTHORIZATION=self.credentials['writeonly']) + response = object_permissions_view(request, pk='1') + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + # Read list + def test_can_read_list_permissions(self): + request = factory.get('/', HTTP_AUTHORIZATION=self.credentials['readonly']) + object_permissions_list_view.cls.filter_backends = (DjangoObjectPermissionsFilter,) + response = object_permissions_list_view(request) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data[0].get('id'), 1) + + def test_cannot_read_list_permissions(self): + request = factory.get('/', HTTP_AUTHORIZATION=self.credentials['writeonly']) + object_permissions_list_view.cls.filter_backends = (DjangoObjectPermissionsFilter,) + response = object_permissions_list_view(request) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertListEqual(response.data, []) diff --git a/awx/lib/site-packages/rest_framework/tests/test_relations_nested.py b/awx/lib/site-packages/rest_framework/tests/test_relations_nested.py index f6d006b39b..d393b0c35b 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_relations_nested.py +++ b/awx/lib/site-packages/rest_framework/tests/test_relations_nested.py @@ -1,107 +1,328 @@ from __future__ import unicode_literals +from django.db import models from django.test import TestCase from rest_framework import serializers -from rest_framework.tests.models import ForeignKeyTarget, ForeignKeySource, NullableForeignKeySource, OneToOneTarget, NullableOneToOneSource -class ForeignKeySourceSerializer(serializers.ModelSerializer): - class Meta: - model = ForeignKeySource - fields = ('id', 'name', 'target') - depth = 1 +class OneToOneTarget(models.Model): + name = models.CharField(max_length=100) -class ForeignKeyTargetSerializer(serializers.ModelSerializer): - class Meta: - model = ForeignKeyTarget - fields = ('id', 'name', 'sources') - depth = 1 +class OneToOneSource(models.Model): + name = models.CharField(max_length=100) + target = models.OneToOneField(OneToOneTarget, related_name='source', + null=True, blank=True) -class NullableForeignKeySourceSerializer(serializers.ModelSerializer): - class Meta: - model = NullableForeignKeySource - fields = ('id', 'name', 'target') - depth = 1 +class OneToManyTarget(models.Model): + name = models.CharField(max_length=100) -class NullableOneToOneTargetSerializer(serializers.ModelSerializer): - class Meta: - model = OneToOneTarget - fields = ('id', 'name', 'nullable_source') - depth = 1 +class OneToManySource(models.Model): + name = models.CharField(max_length=100) + target = models.ForeignKey(OneToManyTarget, related_name='sources') -class ReverseForeignKeyTests(TestCase): +class ReverseNestedOneToOneTests(TestCase): def setUp(self): - target = ForeignKeyTarget(name='target-1') - target.save() - new_target = ForeignKeyTarget(name='target-2') - new_target.save() + class OneToOneSourceSerializer(serializers.ModelSerializer): + class Meta: + model = OneToOneSource + fields = ('id', 'name') + + class OneToOneTargetSerializer(serializers.ModelSerializer): + source = OneToOneSourceSerializer() + + class Meta: + model = OneToOneTarget + fields = ('id', 'name', 'source') + + self.Serializer = OneToOneTargetSerializer + for idx in range(1, 4): - source = ForeignKeySource(name='source-%d' % idx, target=target) + target = OneToOneTarget(name='target-%d' % idx) + target.save() + source = OneToOneSource(name='source-%d' % idx, target=target) source.save() - def test_foreign_key_retrieve(self): - queryset = ForeignKeySource.objects.all() - serializer = ForeignKeySourceSerializer(queryset, many=True) - expected = [ - {'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}}, - {'id': 2, 'name': 'source-2', 'target': {'id': 1, 'name': 'target-1'}}, - {'id': 3, 'name': 'source-3', 'target': {'id': 1, 'name': 'target-1'}}, - ] - self.assertEqual(serializer.data, expected) - - def test_reverse_foreign_key_retrieve(self): - queryset = ForeignKeyTarget.objects.all() - serializer = ForeignKeyTargetSerializer(queryset, many=True) - expected = [ - {'id': 1, 'name': 'target-1', 'sources': [ - {'id': 1, 'name': 'source-1', 'target': 1}, - {'id': 2, 'name': 'source-2', 'target': 1}, - {'id': 3, 'name': 'source-3', 'target': 1}, - ]}, - {'id': 2, 'name': 'target-2', 'sources': [ - ]} - ] - self.assertEqual(serializer.data, expected) - - -class NestedNullableForeignKeyTests(TestCase): - def setUp(self): - target = ForeignKeyTarget(name='target-1') - target.save() - for idx in range(1, 4): - if idx == 3: - target = None - source = NullableForeignKeySource(name='source-%d' % idx, target=target) - source.save() - - def test_foreign_key_retrieve_with_null(self): - queryset = NullableForeignKeySource.objects.all() - serializer = NullableForeignKeySourceSerializer(queryset, many=True) - expected = [ - {'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}}, - {'id': 2, 'name': 'source-2', 'target': {'id': 1, 'name': 'target-1'}}, - {'id': 3, 'name': 'source-3', 'target': None}, - ] - self.assertEqual(serializer.data, expected) - - -class NestedNullableOneToOneTests(TestCase): - def setUp(self): - target = OneToOneTarget(name='target-1') - target.save() - new_target = OneToOneTarget(name='target-2') - new_target.save() - source = NullableOneToOneSource(name='source-1', target=target) - source.save() - - def test_reverse_foreign_key_retrieve_with_null(self): + def test_one_to_one_retrieve(self): queryset = OneToOneTarget.objects.all() - serializer = NullableOneToOneTargetSerializer(queryset, many=True) + serializer = self.Serializer(queryset, many=True) expected = [ - {'id': 1, 'name': 'target-1', 'nullable_source': {'id': 1, 'name': 'source-1', 'target': 1}}, - {'id': 2, 'name': 'target-2', 'nullable_source': None}, + {'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}}, + {'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}}, + {'id': 3, 'name': 'target-3', 'source': {'id': 3, 'name': 'source-3'}} + ] + self.assertEqual(serializer.data, expected) + + def test_one_to_one_create(self): + data = {'id': 4, 'name': 'target-4', 'source': {'id': 4, 'name': 'source-4'}} + serializer = self.Serializer(data=data) + self.assertTrue(serializer.is_valid()) + obj = serializer.save() + self.assertEqual(serializer.data, data) + self.assertEqual(obj.name, 'target-4') + + # Ensure (target 4, target_source 4, source 4) are added, and + # everything else is as expected. + queryset = OneToOneTarget.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}}, + {'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}}, + {'id': 3, 'name': 'target-3', 'source': {'id': 3, 'name': 'source-3'}}, + {'id': 4, 'name': 'target-4', 'source': {'id': 4, 'name': 'source-4'}} + ] + self.assertEqual(serializer.data, expected) + + def test_one_to_one_create_with_invalid_data(self): + data = {'id': 4, 'name': 'target-4', 'source': {'id': 4}} + serializer = self.Serializer(data=data) + self.assertFalse(serializer.is_valid()) + self.assertEqual(serializer.errors, {'source': [{'name': ['This field is required.']}]}) + + def test_one_to_one_update(self): + data = {'id': 3, 'name': 'target-3-updated', 'source': {'id': 3, 'name': 'source-3-updated'}} + instance = OneToOneTarget.objects.get(pk=3) + serializer = self.Serializer(instance, data=data) + self.assertTrue(serializer.is_valid()) + obj = serializer.save() + self.assertEqual(serializer.data, data) + self.assertEqual(obj.name, 'target-3-updated') + + # Ensure (target 3, target_source 3, source 3) are updated, + # and everything else is as expected. + queryset = OneToOneTarget.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}}, + {'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}}, + {'id': 3, 'name': 'target-3-updated', 'source': {'id': 3, 'name': 'source-3-updated'}} + ] + self.assertEqual(serializer.data, expected) + + +class ForwardNestedOneToOneTests(TestCase): + def setUp(self): + class OneToOneTargetSerializer(serializers.ModelSerializer): + class Meta: + model = OneToOneTarget + fields = ('id', 'name') + + class OneToOneSourceSerializer(serializers.ModelSerializer): + target = OneToOneTargetSerializer() + + class Meta: + model = OneToOneSource + fields = ('id', 'name', 'target') + + self.Serializer = OneToOneSourceSerializer + + for idx in range(1, 4): + target = OneToOneTarget(name='target-%d' % idx) + target.save() + source = OneToOneSource(name='source-%d' % idx, target=target) + source.save() + + def test_one_to_one_retrieve(self): + queryset = OneToOneSource.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}}, + {'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}}, + {'id': 3, 'name': 'source-3', 'target': {'id': 3, 'name': 'target-3'}} + ] + self.assertEqual(serializer.data, expected) + + def test_one_to_one_create(self): + data = {'id': 4, 'name': 'source-4', 'target': {'id': 4, 'name': 'target-4'}} + serializer = self.Serializer(data=data) + self.assertTrue(serializer.is_valid()) + obj = serializer.save() + self.assertEqual(serializer.data, data) + self.assertEqual(obj.name, 'source-4') + + # Ensure (target 4, target_source 4, source 4) are added, and + # everything else is as expected. + queryset = OneToOneSource.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}}, + {'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}}, + {'id': 3, 'name': 'source-3', 'target': {'id': 3, 'name': 'target-3'}}, + {'id': 4, 'name': 'source-4', 'target': {'id': 4, 'name': 'target-4'}} + ] + self.assertEqual(serializer.data, expected) + + def test_one_to_one_create_with_invalid_data(self): + data = {'id': 4, 'name': 'source-4', 'target': {'id': 4}} + serializer = self.Serializer(data=data) + self.assertFalse(serializer.is_valid()) + self.assertEqual(serializer.errors, {'target': [{'name': ['This field is required.']}]}) + + def test_one_to_one_update(self): + data = {'id': 3, 'name': 'source-3-updated', 'target': {'id': 3, 'name': 'target-3-updated'}} + instance = OneToOneSource.objects.get(pk=3) + serializer = self.Serializer(instance, data=data) + self.assertTrue(serializer.is_valid()) + obj = serializer.save() + self.assertEqual(serializer.data, data) + self.assertEqual(obj.name, 'source-3-updated') + + # Ensure (target 3, target_source 3, source 3) are updated, + # and everything else is as expected. + queryset = OneToOneSource.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}}, + {'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}}, + {'id': 3, 'name': 'source-3-updated', 'target': {'id': 3, 'name': 'target-3-updated'}} + ] + self.assertEqual(serializer.data, expected) + + def test_one_to_one_update_to_null(self): + data = {'id': 3, 'name': 'source-3-updated', 'target': None} + instance = OneToOneSource.objects.get(pk=3) + serializer = self.Serializer(instance, data=data) + self.assertTrue(serializer.is_valid()) + obj = serializer.save() + + self.assertEqual(serializer.data, data) + self.assertEqual(obj.name, 'source-3-updated') + self.assertEqual(obj.target, None) + + queryset = OneToOneSource.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}}, + {'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}}, + {'id': 3, 'name': 'source-3-updated', 'target': None} + ] + self.assertEqual(serializer.data, expected) + + # TODO: Nullable 1-1 tests + # def test_one_to_one_delete(self): + # data = {'id': 3, 'name': 'target-3', 'target_source': None} + # instance = OneToOneTarget.objects.get(pk=3) + # serializer = self.Serializer(instance, data=data) + # self.assertTrue(serializer.is_valid()) + # serializer.save() + + # # Ensure (target_source 3, source 3) are deleted, + # # and everything else is as expected. + # queryset = OneToOneTarget.objects.all() + # serializer = self.Serializer(queryset) + # expected = [ + # {'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}}, + # {'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}}, + # {'id': 3, 'name': 'target-3', 'source': None} + # ] + # self.assertEqual(serializer.data, expected) + + +class ReverseNestedOneToManyTests(TestCase): + def setUp(self): + class OneToManySourceSerializer(serializers.ModelSerializer): + class Meta: + model = OneToManySource + fields = ('id', 'name') + + class OneToManyTargetSerializer(serializers.ModelSerializer): + sources = OneToManySourceSerializer(many=True, allow_add_remove=True) + + class Meta: + model = OneToManyTarget + fields = ('id', 'name', 'sources') + + self.Serializer = OneToManyTargetSerializer + + target = OneToManyTarget(name='target-1') + target.save() + for idx in range(1, 4): + source = OneToManySource(name='source-%d' % idx, target=target) + source.save() + + def test_one_to_many_retrieve(self): + queryset = OneToManyTarget.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'}, + {'id': 2, 'name': 'source-2'}, + {'id': 3, 'name': 'source-3'}]}, + ] + self.assertEqual(serializer.data, expected) + + def test_one_to_many_create(self): + data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'}, + {'id': 2, 'name': 'source-2'}, + {'id': 3, 'name': 'source-3'}, + {'id': 4, 'name': 'source-4'}]} + instance = OneToManyTarget.objects.get(pk=1) + serializer = self.Serializer(instance, data=data) + self.assertTrue(serializer.is_valid()) + obj = serializer.save() + self.assertEqual(serializer.data, data) + self.assertEqual(obj.name, 'target-1') + + # Ensure source 4 is added, and everything else is as + # expected. + queryset = OneToManyTarget.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'}, + {'id': 2, 'name': 'source-2'}, + {'id': 3, 'name': 'source-3'}, + {'id': 4, 'name': 'source-4'}]} + ] + self.assertEqual(serializer.data, expected) + + def test_one_to_many_create_with_invalid_data(self): + data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'}, + {'id': 2, 'name': 'source-2'}, + {'id': 3, 'name': 'source-3'}, + {'id': 4}]} + serializer = self.Serializer(data=data) + self.assertFalse(serializer.is_valid()) + self.assertEqual(serializer.errors, {'sources': [{}, {}, {}, {'name': ['This field is required.']}]}) + + def test_one_to_many_update(self): + data = {'id': 1, 'name': 'target-1-updated', 'sources': [{'id': 1, 'name': 'source-1-updated'}, + {'id': 2, 'name': 'source-2'}, + {'id': 3, 'name': 'source-3'}]} + instance = OneToManyTarget.objects.get(pk=1) + serializer = self.Serializer(instance, data=data) + self.assertTrue(serializer.is_valid()) + obj = serializer.save() + self.assertEqual(serializer.data, data) + self.assertEqual(obj.name, 'target-1-updated') + + # Ensure (target 1, source 1) are updated, + # and everything else is as expected. + queryset = OneToManyTarget.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'target-1-updated', 'sources': [{'id': 1, 'name': 'source-1-updated'}, + {'id': 2, 'name': 'source-2'}, + {'id': 3, 'name': 'source-3'}]} + + ] + self.assertEqual(serializer.data, expected) + + def test_one_to_many_delete(self): + data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'}, + {'id': 3, 'name': 'source-3'}]} + instance = OneToManyTarget.objects.get(pk=1) + serializer = self.Serializer(instance, data=data) + self.assertTrue(serializer.is_valid()) + serializer.save() + + # Ensure source 2 is deleted, and everything else is as + # expected. + queryset = OneToManyTarget.objects.all() + serializer = self.Serializer(queryset, many=True) + expected = [ + {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'}, + {'id': 3, 'name': 'source-3'}]} + ] self.assertEqual(serializer.data, expected) diff --git a/awx/lib/site-packages/rest_framework/tests/test_relations_pk.py b/awx/lib/site-packages/rest_framework/tests/test_relations_pk.py index e2a1b81520..3815afdd84 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_relations_pk.py +++ b/awx/lib/site-packages/rest_framework/tests/test_relations_pk.py @@ -283,6 +283,15 @@ class PKForeignKeyTests(TestCase): self.assertFalse(serializer.is_valid()) self.assertEqual(serializer.errors, {'target': ['This field is required.']}) + def test_foreign_key_with_empty(self): + """ + Regression test for #1072 + + https://github.com/tomchristie/django-rest-framework/issues/1072 + """ + serializer = NullableForeignKeySourceSerializer() + self.assertEqual(serializer.data['target'], None) + class PKNullableForeignKeyTests(TestCase): def setUp(self): diff --git a/awx/lib/site-packages/rest_framework/tests/test_routers.py b/awx/lib/site-packages/rest_framework/tests/test_routers.py index 5fcccb7414..e723f7d45c 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_routers.py +++ b/awx/lib/site-packages/rest_framework/tests/test_routers.py @@ -146,7 +146,7 @@ class TestTrailingSlashRemoved(TestCase): self.urls = self.router.urls def test_urls_can_have_trailing_slash_removed(self): - expected = ['^notes$', '^notes/(?P<pk>[^/]+)$'] + expected = ['^notes$', '^notes/(?P<pk>[^/.]+)$'] for idx in range(len(expected)): self.assertEqual(expected[idx], self.urls[idx].regex.pattern) diff --git a/awx/lib/site-packages/rest_framework/tests/test_testing.py b/awx/lib/site-packages/rest_framework/tests/test_testing.py index 49d45fc292..48b8956b5f 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_testing.py +++ b/awx/lib/site-packages/rest_framework/tests/test_testing.py @@ -17,8 +17,18 @@ def view(request): }) +@api_view(['GET', 'POST']) +def session_view(request): + active_session = request.session.get('active_session', False) + request.session['active_session'] = True + return Response({ + 'active_session': active_session + }) + + urlpatterns = patterns('', url(r'^view/$', view), + url(r'^session-view/$', session_view), ) @@ -46,6 +56,26 @@ class TestAPITestClient(TestCase): response = self.client.get('/view/') self.assertEqual(response.data['user'], 'example') + def test_force_authenticate_with_sessions(self): + """ + Setting `.force_authenticate()` forcibly authenticates each request. + """ + user = User.objects.create_user('example', 'example@example.com') + self.client.force_authenticate(user) + + # First request does not yet have an active session + response = self.client.get('/session-view/') + self.assertEqual(response.data['active_session'], False) + + # Subsequant requests have an active session + response = self.client.get('/session-view/') + self.assertEqual(response.data['active_session'], True) + + # Force authenticating as `None` should also logout the user session. + self.client.force_authenticate(None) + response = self.client.get('/session-view/') + self.assertEqual(response.data['active_session'], False) + def test_csrf_exempt_by_default(self): """ By default, the test client is CSRF exempt. diff --git a/awx/lib/site-packages/rest_framework/tests/test_views.py b/awx/lib/site-packages/rest_framework/tests/test_views.py index c0bec5aed1..65c7e50ea1 100644 --- a/awx/lib/site-packages/rest_framework/tests/test_views.py +++ b/awx/lib/site-packages/rest_framework/tests/test_views.py @@ -32,6 +32,16 @@ def basic_view(request): return {'method': 'PATCH', 'data': request.DATA} +class ErrorView(APIView): + def get(self, request, *args, **kwargs): + raise Exception + + +@api_view(['GET']) +def error_view(request): + raise Exception + + def sanitise_json_error(error_dict): """ Exact contents of JSON error messages depend on the installed version @@ -99,3 +109,34 @@ class FunctionBasedViewIntegrationTests(TestCase): } self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(sanitise_json_error(response.data), expected) + + +class TestCustomExceptionHandler(TestCase): + def setUp(self): + self.DEFAULT_HANDLER = api_settings.EXCEPTION_HANDLER + + def exception_handler(exc): + return Response('Error!', status=status.HTTP_400_BAD_REQUEST) + + api_settings.EXCEPTION_HANDLER = exception_handler + + def tearDown(self): + api_settings.EXCEPTION_HANDLER = self.DEFAULT_HANDLER + + def test_class_based_view_exception_handler(self): + view = ErrorView.as_view() + + request = factory.get('/', content_type='application/json') + response = view(request) + expected = 'Error!' + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data, expected) + + def test_function_based_view_exception_handler(self): + view = error_view + + request = factory.get('/', content_type='application/json') + response = view(request) + expected = 'Error!' + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data, expected) diff --git a/awx/lib/site-packages/rest_framework/throttling.py b/awx/lib/site-packages/rest_framework/throttling.py index 65b4559307..a946d837fa 100644 --- a/awx/lib/site-packages/rest_framework/throttling.py +++ b/awx/lib/site-packages/rest_framework/throttling.py @@ -2,7 +2,7 @@ Provides various throttling policies. """ from __future__ import unicode_literals -from django.core.cache import cache +from django.core.cache import cache as default_cache from django.core.exceptions import ImproperlyConfigured from rest_framework.settings import api_settings import time @@ -39,6 +39,7 @@ class SimpleRateThrottle(BaseThrottle): Previous request information used for throttling is stored in the cache. """ + cache = default_cache timer = time.time cache_format = 'throtte_%(scope)s_%(ident)s' scope = None @@ -99,7 +100,7 @@ class SimpleRateThrottle(BaseThrottle): if self.key is None: return True - self.history = cache.get(self.key, []) + self.history = self.cache.get(self.key, []) self.now = self.timer() # Drop any requests from the history which have now passed the @@ -116,7 +117,7 @@ class SimpleRateThrottle(BaseThrottle): into the cache. """ self.history.insert(0, self.now) - cache.set(self.key, self.history, self.duration) + self.cache.set(self.key, self.history, self.duration) return True def throttle_failure(self): @@ -151,7 +152,9 @@ class AnonRateThrottle(SimpleRateThrottle): if request.user.is_authenticated(): return None # Only throttle unauthenticated requests. - ident = request.META.get('REMOTE_ADDR', None) + ident = request.META.get('HTTP_X_FORWARDED_FOR') + if ident is None: + ident = request.META.get('REMOTE_ADDR') return self.cache_format % { 'scope': self.scope, diff --git a/awx/lib/site-packages/rest_framework/utils/breadcrumbs.py b/awx/lib/site-packages/rest_framework/utils/breadcrumbs.py index d51374b0a8..e6690d1705 100644 --- a/awx/lib/site-packages/rest_framework/utils/breadcrumbs.py +++ b/awx/lib/site-packages/rest_framework/utils/breadcrumbs.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals from django.core.urlresolvers import resolve, get_script_prefix -from rest_framework.utils.formatting import get_view_name def get_breadcrumbs(url): @@ -9,8 +8,11 @@ def get_breadcrumbs(url): tuple of (name, url). """ + from rest_framework.settings import api_settings from rest_framework.views import APIView + view_name_func = api_settings.VIEW_NAME_FUNCTION + def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen): """ Add tuples of (name, url) to the breadcrumbs list, @@ -30,7 +32,7 @@ def get_breadcrumbs(url): # Probably an optional trailing slash. if not seen or seen[-1] != view: suffix = getattr(view, 'suffix', None) - name = get_view_name(view.cls, suffix) + name = view_name_func(cls, suffix) breadcrumbs_list.insert(0, (name, prefix + url)) seen.append(view) diff --git a/awx/lib/site-packages/rest_framework/utils/formatting.py b/awx/lib/site-packages/rest_framework/utils/formatting.py index 4bec838776..4b59ba8403 100644 --- a/awx/lib/site-packages/rest_framework/utils/formatting.py +++ b/awx/lib/site-packages/rest_framework/utils/formatting.py @@ -5,11 +5,13 @@ from __future__ import unicode_literals from django.utils.html import escape from django.utils.safestring import mark_safe -from rest_framework.compat import apply_markdown, smart_text +from rest_framework.compat import apply_markdown +from rest_framework.settings import api_settings +from textwrap import dedent import re -def _remove_trailing_string(content, trailing): +def remove_trailing_string(content, trailing): """ Strip trailing component `trailing` from `content` if it exists. Used when generating names from view classes. @@ -19,10 +21,14 @@ def _remove_trailing_string(content, trailing): return content -def _remove_leading_indent(content): +def dedent(content): """ Remove leading indent from a block of text. Used when generating descriptions from docstrings. + + Note that python's `textwrap.dedent` doesn't quite cut it, + as it fails to dedent multiline docstrings that include + unindented text on the initial line. """ whitespace_counts = [len(line) - len(line.lstrip(' ')) for line in content.splitlines()[1:] if line.lstrip()] @@ -31,11 +37,10 @@ def _remove_leading_indent(content): if whitespace_counts: whitespace_pattern = '^' + (' ' * min(whitespace_counts)) content = re.sub(re.compile(whitespace_pattern, re.MULTILINE), '', content) - content = content.strip('\n') - return content + return content.strip() -def _camelcase_to_spaces(content): +def camelcase_to_spaces(content): """ Translate 'CamelCaseNames' to 'Camel Case Names'. Used when generating names from view classes. @@ -44,31 +49,6 @@ def _camelcase_to_spaces(content): content = re.sub(camelcase_boundry, ' \\1', content).strip() return ' '.join(content.split('_')).title() - -def get_view_name(cls, suffix=None): - """ - Return a formatted name for an `APIView` class or `@api_view` function. - """ - name = cls.__name__ - name = _remove_trailing_string(name, 'View') - name = _remove_trailing_string(name, 'ViewSet') - name = _camelcase_to_spaces(name) - if suffix: - name += ' ' + suffix - return name - - -def get_view_description(cls, html=False): - """ - Return a description for an `APIView` class or `@api_view` function. - """ - description = cls.__doc__ or '' - description = _remove_leading_indent(smart_text(description)) - if html: - return markup_description(description) - return description - - def markup_description(description): """ Apply HTML markup to the given description. diff --git a/awx/lib/site-packages/rest_framework/views.py b/awx/lib/site-packages/rest_framework/views.py index d51233a932..853e646145 100644 --- a/awx/lib/site-packages/rest_framework/views.py +++ b/awx/lib/site-packages/rest_framework/views.py @@ -8,16 +8,79 @@ from django.http import Http404 from django.utils.datastructures import SortedDict from django.views.decorators.csrf import csrf_exempt from rest_framework import status, exceptions -from rest_framework.compat import View, HttpResponseBase +from rest_framework.compat import smart_text, HttpResponseBase, View from rest_framework.request import Request from rest_framework.response import Response from rest_framework.settings import api_settings -from rest_framework.utils.formatting import get_view_name, get_view_description +from rest_framework.utils import formatting + + +def get_view_name(view_cls, suffix=None): + """ + Given a view class, return a textual name to represent the view. + This name is used in the browsable API, and in OPTIONS responses. + + This function is the default for the `VIEW_NAME_FUNCTION` setting. + """ + name = view_cls.__name__ + name = formatting.remove_trailing_string(name, 'View') + name = formatting.remove_trailing_string(name, 'ViewSet') + name = formatting.camelcase_to_spaces(name) + if suffix: + name += ' ' + suffix + + return name + +def get_view_description(view_cls, html=False): + """ + Given a view class, return a textual description to represent the view. + This name is used in the browsable API, and in OPTIONS responses. + + This function is the default for the `VIEW_DESCRIPTION_FUNCTION` setting. + """ + description = view_cls.__doc__ or '' + description = formatting.dedent(smart_text(description)) + if html: + return formatting.markup_description(description) + return description + + +def exception_handler(exc): + """ + Returns the response that should be used for any given exception. + + By default we handle the REST framework `APIException`, and also + Django's builtin `Http404` and `PermissionDenied` exceptions. + + Any unhandled exceptions may return `None`, which will cause a 500 error + to be raised. + """ + if isinstance(exc, exceptions.APIException): + headers = {} + if getattr(exc, 'auth_header', None): + headers['WWW-Authenticate'] = exc.auth_header + if getattr(exc, 'wait', None): + headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait + + return Response({'detail': exc.detail}, + status=exc.status_code, + headers=headers) + + elif isinstance(exc, Http404): + return Response({'detail': 'Not found'}, + status=status.HTTP_404_NOT_FOUND) + + elif isinstance(exc, PermissionDenied): + return Response({'detail': 'Permission denied'}, + status=status.HTTP_403_FORBIDDEN) + + # Note: Unhandled exceptions will raise a 500 error. + return None class APIView(View): - settings = api_settings + # The following policies may be set at either globally, or per-view. renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES parser_classes = api_settings.DEFAULT_PARSER_CLASSES authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES @@ -25,6 +88,9 @@ class APIView(View): permission_classes = api_settings.DEFAULT_PERMISSION_CLASSES content_negotiation_class = api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS + # Allow dependancy injection of other settings to make testing easier. + settings = api_settings + @classmethod def as_view(cls, **initkwargs): """ @@ -110,6 +176,22 @@ class APIView(View): 'request': getattr(self, 'request', None) } + def get_view_name(self): + """ + Return the view name, as used in OPTIONS responses and in the + browsable API. + """ + func = self.settings.VIEW_NAME_FUNCTION + return func(self.__class__, getattr(self, 'suffix', None)) + + def get_view_description(self, html=False): + """ + Return some descriptive text for the view, as used in OPTIONS responses + and in the browsable API. + """ + func = self.settings.VIEW_DESCRIPTION_FUNCTION + return func(self.__class__, html) + # API policy instantiation methods def get_format_suffix(self, **kwargs): @@ -269,33 +351,23 @@ class APIView(View): Handle any exception that occurs, by returning an appropriate response, or re-raising the error. """ - if isinstance(exc, exceptions.Throttled) and exc.wait is not None: - # Throttle wait header - self.headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait - if isinstance(exc, (exceptions.NotAuthenticated, exceptions.AuthenticationFailed)): # WWW-Authenticate header for 401 responses, else coerce to 403 auth_header = self.get_authenticate_header(self.request) if auth_header: - self.headers['WWW-Authenticate'] = auth_header + exc.auth_header = auth_header else: exc.status_code = status.HTTP_403_FORBIDDEN - if isinstance(exc, exceptions.APIException): - return Response({'detail': exc.detail}, - status=exc.status_code, - exception=True) - elif isinstance(exc, Http404): - return Response({'detail': 'Not found'}, - status=status.HTTP_404_NOT_FOUND, - exception=True) - elif isinstance(exc, PermissionDenied): - return Response({'detail': 'Permission denied'}, - status=status.HTTP_403_FORBIDDEN, - exception=True) - raise + response = self.settings.EXCEPTION_HANDLER(exc) + + if response is None: + raise + + response.exception = True + return response # Note: session based authentication is explicitly CSRF validated, # all other authentication is CSRF exempt. @@ -342,16 +414,12 @@ class APIView(View): Return a dictionary of metadata about the view. Used to return responses for OPTIONS requests. """ - - # This is used by ViewSets to disambiguate instance vs list views - view_name_suffix = getattr(self, 'suffix', None) - # By default we can't provide any form-like information, however the # generic views override this implementation and add additional # information for POST and PUT methods, based on the serializer. ret = SortedDict() - ret['name'] = get_view_name(self.__class__, view_name_suffix) - ret['description'] = get_view_description(self.__class__) + ret['name'] = self.get_view_name() + ret['description'] = self.get_view_description() ret['renders'] = [renderer.media_type for renderer in self.renderer_classes] ret['parses'] = [parser.media_type for parser in self.parser_classes] return ret diff --git a/awx/lib/site-packages/setuptools/__init__.py b/awx/lib/site-packages/setuptools/__init__.py new file mode 100644 index 0000000000..fc9b7b936c --- /dev/null +++ b/awx/lib/site-packages/setuptools/__init__.py @@ -0,0 +1,98 @@ +"""Extensions to the 'distutils' for large or complex distributions""" + +import os +import sys +import distutils.core +import distutils.filelist +from distutils.core import Command as _Command +from distutils.util import convert_path + +import setuptools.version +from setuptools.extension import Extension +from setuptools.dist import Distribution, Feature, _get_unpatched +from setuptools.depends import Require + +__all__ = [ + 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require', + 'find_packages' +] + +__version__ = setuptools.version.__version__ + +bootstrap_install_from = None + +# If we run 2to3 on .py files, should we also convert docstrings? +# Default: yes; assume that we can detect doctests reliably +run_2to3_on_doctests = True +# Standard package names for fixer packages +lib2to3_fixer_packages = ['lib2to3.fixes'] + +def find_packages(where='.', exclude=()): + """Return a list all Python packages found within directory 'where' + + 'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it + will be converted to the appropriate local path syntax. 'exclude' is a + sequence of package names to exclude; '*' can be used as a wildcard in the + names, such that 'foo.*' will exclude all subpackages of 'foo' (but not + 'foo' itself). + """ + out = [] + stack=[(convert_path(where), '')] + while stack: + where,prefix = stack.pop(0) + for name in os.listdir(where): + fn = os.path.join(where,name) + looks_like_package = ( + '.' not in name + and os.path.isdir(fn) + and os.path.isfile(os.path.join(fn, '__init__.py')) + ) + if looks_like_package: + out.append(prefix+name) + stack.append((fn, prefix+name+'.')) + for pat in list(exclude)+['ez_setup']: + from fnmatch import fnmatchcase + out = [item for item in out if not fnmatchcase(item,pat)] + return out + +setup = distutils.core.setup + +_Command = _get_unpatched(_Command) + +class Command(_Command): + __doc__ = _Command.__doc__ + + command_consumes_arguments = False + + def __init__(self, dist, **kw): + # Add support for keyword arguments + _Command.__init__(self,dist) + for k,v in kw.items(): + setattr(self,k,v) + + def reinitialize_command(self, command, reinit_subcommands=0, **kw): + cmd = _Command.reinitialize_command(self, command, reinit_subcommands) + for k,v in kw.items(): + setattr(cmd,k,v) # update command with keywords + return cmd + +distutils.core.Command = Command # we can't patch distutils.cmd, alas + +def findall(dir = os.curdir): + """Find all files under 'dir' and return the list of full filenames + (relative to 'dir'). + """ + all_files = [] + for base, dirs, files in os.walk(dir): + if base==os.curdir or base.startswith(os.curdir+os.sep): + base = base[2:] + if base: + files = [os.path.join(base, f) for f in files] + all_files.extend(filter(os.path.isfile, files)) + return all_files + +distutils.filelist.findall = findall # fix findall bug in distutils. + +# sys.dont_write_bytecode was introduced in Python 2.6. +_dont_write_bytecode = getattr(sys, 'dont_write_bytecode', + bool(os.environ.get("PYTHONDONTWRITEBYTECODE"))) diff --git a/awx/lib/site-packages/setuptools/_backport/__init__.py b/awx/lib/site-packages/setuptools/_backport/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/setuptools/_backport/hashlib/__init__.py b/awx/lib/site-packages/setuptools/_backport/hashlib/__init__.py new file mode 100644 index 0000000000..5aeab496af --- /dev/null +++ b/awx/lib/site-packages/setuptools/_backport/hashlib/__init__.py @@ -0,0 +1,146 @@ +# $Id$ +# +# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org) +# Licensed to PSF under a Contributor Agreement. +# + +__doc__ = """hashlib module - A common interface to many hash functions. + +new(name, string='') - returns a new hash object implementing the + given hash function; initializing the hash + using the given string data. + +Named constructor functions are also available, these are much faster +than using new(): + +md5(), sha1(), sha224(), sha256(), sha384(), and sha512() + +More algorithms may be available on your platform but the above are +guaranteed to exist. + +NOTE: If you want the adler32 or crc32 hash functions they are available in +the zlib module. + +Choose your hash function wisely. Some have known collision weaknesses. +sha384 and sha512 will be slow on 32 bit platforms. + +Hash objects have these methods: + - update(arg): Update the hash object with the string arg. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments. + - digest(): Return the digest of the strings passed to the update() method + so far. This may contain non-ASCII characters, including + NUL bytes. + - hexdigest(): Like digest() except the digest is returned as a string of + double length, containing only hexadecimal digits. + - copy(): Return a copy (clone) of the hash object. This can be used to + efficiently compute the digests of strings that share a common + initial substring. + +For example, to obtain the digest of the string 'Nobody inspects the +spammish repetition': + + >>> import hashlib + >>> m = hashlib.md5() + >>> m.update("Nobody inspects") + >>> m.update(" the spammish repetition") + >>> m.digest() + '\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' + +More condensed: + + >>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest() + 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' + +""" + +# This tuple and __get_builtin_constructor() must be modified if a new +# always available algorithm is added. +__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') + +algorithms = __always_supported + +__all__ = __always_supported + ('new', 'algorithms') + + +def __get_builtin_constructor(name): + try: + if name in ('SHA1', 'sha1'): + import _sha + return _sha.new + elif name in ('MD5', 'md5'): + import md5 + return md5.new + elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): + import _sha256 + bs = name[3:] + if bs == '256': + return _sha256.sha256 + elif bs == '224': + return _sha256.sha224 + elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): + import _sha512 + bs = name[3:] + if bs == '512': + return _sha512.sha512 + elif bs == '384': + return _sha512.sha384 + except ImportError: + pass # no extension module, this hash is unsupported. + + raise ValueError('unsupported hash type %s' % name) + + +def __get_openssl_constructor(name): + try: + f = getattr(_hashlib, 'openssl_' + name) + # Allow the C module to raise ValueError. The function will be + # defined but the hash not actually available thanks to OpenSSL. + f() + # Use the C function directly (very fast) + return f + except (AttributeError, ValueError): + return __get_builtin_constructor(name) + + +def __py_new(name, string=''): + """new(name, string='') - Return a new hashing object using the named algorithm; + optionally initialized with a string. + """ + return __get_builtin_constructor(name)(string) + + +def __hash_new(name, string=''): + """new(name, string='') - Return a new hashing object using the named algorithm; + optionally initialized with a string. + """ + try: + return _hashlib.new(name, string) + except ValueError: + # If the _hashlib module (OpenSSL) doesn't support the named + # hash, try using our builtin implementations. + # This allows for SHA224/256 and SHA384/512 support even though + # the OpenSSL library prior to 0.9.8 doesn't provide them. + return __get_builtin_constructor(name)(string) + + +try: + import _hashlib + new = __hash_new + __get_hash = __get_openssl_constructor +except ImportError: + new = __py_new + __get_hash = __get_builtin_constructor + +for __func_name in __always_supported: + # try them all, some may not work due to the OpenSSL + # version not supporting that algorithm. + try: + globals()[__func_name] = __get_hash(__func_name) + except ValueError: + import logging + logging.exception('code for hash %s was not found.', __func_name) + +# Cleanup locals() +del __always_supported, __func_name, __get_hash +del __py_new, __hash_new, __get_openssl_constructor diff --git a/awx/lib/site-packages/setuptools/_backport/hashlib/_sha.py b/awx/lib/site-packages/setuptools/_backport/hashlib/_sha.py new file mode 100644 index 0000000000..d49993c887 --- /dev/null +++ b/awx/lib/site-packages/setuptools/_backport/hashlib/_sha.py @@ -0,0 +1,359 @@ +# -*- coding: iso-8859-1 -*- +"""A sample implementation of SHA-1 in pure Python. + + Framework adapted from Dinu Gherman's MD5 implementation by + J. Hallén and L. Creighton. SHA-1 implementation based directly on + the text of the NIST standard FIPS PUB 180-1. +""" + + +__date__ = '2004-11-17' +__version__ = 0.91 # Modernised by J. Hallén and L. Creighton for Pypy + + +import struct, copy + + +# ====================================================================== +# Bit-Manipulation helpers +# +# _long2bytes() was contributed by Barry Warsaw +# and is reused here with tiny modifications. +# ====================================================================== + +def _long2bytesBigEndian(n, blocksize=0): + """Convert a long integer to a byte string. + + If optional blocksize is given and greater than zero, pad the front + of the byte string with binary zeros so that the length is a multiple + of blocksize. + """ + + # After much testing, this algorithm was deemed to be the fastest. + s = '' + pack = struct.pack + while n > 0: + s = pack('>I', n & 0xffffffff) + s + n = n >> 32 + + # Strip off leading zeros. + for i in range(len(s)): + if s[i] != '\000': + break + else: + # Only happens when n == 0. + s = '\000' + i = 0 + + s = s[i:] + + # Add back some pad bytes. This could be done more efficiently + # w.r.t. the de-padding being done above, but sigh... + if blocksize > 0 and len(s) % blocksize: + s = (blocksize - len(s) % blocksize) * '\000' + s + + return s + + +def _bytelist2longBigEndian(list): + "Transform a list of characters into a list of longs." + + imax = len(list) // 4 + hl = [0] * imax + + j = 0 + i = 0 + while i < imax: + b0 = ord(list[j]) << 24 + b1 = ord(list[j+1]) << 16 + b2 = ord(list[j+2]) << 8 + b3 = ord(list[j+3]) + hl[i] = b0 | b1 | b2 | b3 + i = i+1 + j = j+4 + + return hl + + +def _rotateLeft(x, n): + "Rotate x (32 bit) left n bits circularly." + + return (x << n) | (x >> (32-n)) + + +# ====================================================================== +# The SHA transformation functions +# +# ====================================================================== + +def f0_19(B, C, D): + return (B & C) | ((~ B) & D) + +def f20_39(B, C, D): + return B ^ C ^ D + +def f40_59(B, C, D): + return (B & C) | (B & D) | (C & D) + +def f60_79(B, C, D): + return B ^ C ^ D + + +f = [f0_19, f20_39, f40_59, f60_79] + +# Constants to be used +K = [ + 0x5A827999, # ( 0 <= t <= 19) + 0x6ED9EBA1, # (20 <= t <= 39) + 0x8F1BBCDC, # (40 <= t <= 59) + 0xCA62C1D6 # (60 <= t <= 79) + ] + +class sha: + "An implementation of the MD5 hash function in pure Python." + + digest_size = digestsize = 20 + block_size = 1 + + def __init__(self): + "Initialisation." + + # Initial message length in bits(!). + self.length = 0 + self.count = [0, 0] + + # Initial empty message as a sequence of bytes (8 bit characters). + self.input = [] + + # Call a separate init function, that can be used repeatedly + # to start from scratch on the same object. + self.init() + + + def init(self): + "Initialize the message-digest and set all fields to zero." + + self.length = 0 + self.input = [] + + # Initial 160 bit message digest (5 times 32 bit). + self.H0 = 0x67452301 + self.H1 = 0xEFCDAB89 + self.H2 = 0x98BADCFE + self.H3 = 0x10325476 + self.H4 = 0xC3D2E1F0 + + def _transform(self, W): + + for t in range(16, 80): + W.append(_rotateLeft( + W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1) & 0xffffffff) + + A = self.H0 + B = self.H1 + C = self.H2 + D = self.H3 + E = self.H4 + + """ + This loop was unrolled to gain about 10% in speed + for t in range(0, 80): + TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20] + E = D + D = C + C = _rotateLeft(B, 30) & 0xffffffff + B = A + A = TEMP & 0xffffffff + """ + + for t in range(0, 20): + TEMP = _rotateLeft(A, 5) + ((B & C) | ((~ B) & D)) + E + W[t] + K[0] + E = D + D = C + C = _rotateLeft(B, 30) & 0xffffffff + B = A + A = TEMP & 0xffffffff + + for t in range(20, 40): + TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[1] + E = D + D = C + C = _rotateLeft(B, 30) & 0xffffffff + B = A + A = TEMP & 0xffffffff + + for t in range(40, 60): + TEMP = _rotateLeft(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[t] + K[2] + E = D + D = C + C = _rotateLeft(B, 30) & 0xffffffff + B = A + A = TEMP & 0xffffffff + + for t in range(60, 80): + TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[3] + E = D + D = C + C = _rotateLeft(B, 30) & 0xffffffff + B = A + A = TEMP & 0xffffffff + + + self.H0 = (self.H0 + A) & 0xffffffff + self.H1 = (self.H1 + B) & 0xffffffff + self.H2 = (self.H2 + C) & 0xffffffff + self.H3 = (self.H3 + D) & 0xffffffff + self.H4 = (self.H4 + E) & 0xffffffff + + + # Down from here all methods follow the Python Standard Library + # API of the sha module. + + def update(self, inBuf): + """Add to the current message. + + Update the md5 object with the string arg. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments, i.e. m.update(a); m.update(b) is equivalent + to m.update(a+b). + + The hash is immediately calculated for all full blocks. The final + calculation is made in digest(). It will calculate 1-2 blocks, + depending on how much padding we have to add. This allows us to + keep an intermediate value for the hash, so that we only need to + make minimal recalculation if we call update() to add more data + to the hashed string. + """ + + leninBuf = len(inBuf) + + # Compute number of bytes mod 64. + index = (self.count[1] >> 3) & 0x3F + + # Update number of bits. + self.count[1] = self.count[1] + (leninBuf << 3) + if self.count[1] < (leninBuf << 3): + self.count[0] = self.count[0] + 1 + self.count[0] = self.count[0] + (leninBuf >> 29) + + partLen = 64 - index + + if leninBuf >= partLen: + self.input[index:] = list(inBuf[:partLen]) + self._transform(_bytelist2longBigEndian(self.input)) + i = partLen + while i + 63 < leninBuf: + self._transform(_bytelist2longBigEndian(list(inBuf[i:i+64]))) + i = i + 64 + else: + self.input = list(inBuf[i:leninBuf]) + else: + i = 0 + self.input = self.input + list(inBuf) + + + def digest(self): + """Terminate the message-digest computation and return digest. + + Return the digest of the strings passed to the update() + method so far. This is a 16-byte string which may contain + non-ASCII characters, including null bytes. + """ + + H0 = self.H0 + H1 = self.H1 + H2 = self.H2 + H3 = self.H3 + H4 = self.H4 + input = [] + self.input + count = [] + self.count + + index = (self.count[1] >> 3) & 0x3f + + if index < 56: + padLen = 56 - index + else: + padLen = 120 - index + + padding = ['\200'] + ['\000'] * 63 + self.update(padding[:padLen]) + + # Append length (before padding). + bits = _bytelist2longBigEndian(self.input[:56]) + count + + self._transform(bits) + + # Store state in digest. + digest = _long2bytesBigEndian(self.H0, 4) + \ + _long2bytesBigEndian(self.H1, 4) + \ + _long2bytesBigEndian(self.H2, 4) + \ + _long2bytesBigEndian(self.H3, 4) + \ + _long2bytesBigEndian(self.H4, 4) + + self.H0 = H0 + self.H1 = H1 + self.H2 = H2 + self.H3 = H3 + self.H4 = H4 + self.input = input + self.count = count + + return digest + + + def hexdigest(self): + """Terminate and return digest in HEX form. + + Like digest() except the digest is returned as a string of + length 32, containing only hexadecimal digits. This may be + used to exchange the value safely in email or other non- + binary environments. + """ + return ''.join(['%02x' % ord(c) for c in self.digest()]) + + def copy(self): + """Return a clone object. + + Return a copy ('clone') of the md5 object. This can be used + to efficiently compute the digests of strings that share + a common initial substring. + """ + + return copy.deepcopy(self) + + +# ====================================================================== +# Mimic Python top-level functions from standard library API +# for consistency with the _sha module of the standard library. +# ====================================================================== + +# These are mandatory variables in the module. They have constant values +# in the SHA standard. + +digest_size = 20 +digestsize = 20 +blocksize = 1 + +def new(arg=None): + """Return a new sha crypto object. + + If arg is present, the method call update(arg) is made. + """ + + crypto = sha() + if arg: + crypto.update(arg) + + return crypto + + +if __name__ == "__main__": + a_str = "just a test string" + + assert 'da39a3ee5e6b4b0d3255bfef95601890afd80709' == new().hexdigest() + assert '3f0cf2e3d9e5903e839417dfc47fed6bfa6457f6' == new(a_str).hexdigest() + assert '0852b254078fe3772568a4aba37b917f3d4066ba' == new(a_str*7).hexdigest() + + s = new(a_str) + s.update(a_str) + assert '8862c1b50967f39d3db6bdc2877d9ccebd3102e5' == s.hexdigest() diff --git a/awx/lib/site-packages/setuptools/_backport/hashlib/_sha256.py b/awx/lib/site-packages/setuptools/_backport/hashlib/_sha256.py new file mode 100644 index 0000000000..805dbd086c --- /dev/null +++ b/awx/lib/site-packages/setuptools/_backport/hashlib/_sha256.py @@ -0,0 +1,260 @@ +import struct + +SHA_BLOCKSIZE = 64 +SHA_DIGESTSIZE = 32 + + +def new_shaobject(): + return { + 'digest': [0]*8, + 'count_lo': 0, + 'count_hi': 0, + 'data': [0]* SHA_BLOCKSIZE, + 'local': 0, + 'digestsize': 0 + } + +ROR = lambda x, y: (((x & 0xffffffff) >> (y & 31)) | (x << (32 - (y & 31)))) & 0xffffffff +Ch = lambda x, y, z: (z ^ (x & (y ^ z))) +Maj = lambda x, y, z: (((x | y) & z) | (x & y)) +S = lambda x, n: ROR(x, n) +R = lambda x, n: (x & 0xffffffff) >> n +Sigma0 = lambda x: (S(x, 2) ^ S(x, 13) ^ S(x, 22)) +Sigma1 = lambda x: (S(x, 6) ^ S(x, 11) ^ S(x, 25)) +Gamma0 = lambda x: (S(x, 7) ^ S(x, 18) ^ R(x, 3)) +Gamma1 = lambda x: (S(x, 17) ^ S(x, 19) ^ R(x, 10)) + +def sha_transform(sha_info): + W = [] + + d = sha_info['data'] + for i in xrange(0,16): + W.append( (d[4*i]<<24) + (d[4*i+1]<<16) + (d[4*i+2]<<8) + d[4*i+3]) + + for i in xrange(16,64): + W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffff ) + + ss = sha_info['digest'][:] + + def RND(a,b,c,d,e,f,g,h,i,ki): + t0 = h + Sigma1(e) + Ch(e, f, g) + ki + W[i]; + t1 = Sigma0(a) + Maj(a, b, c); + d += t0; + h = t0 + t1; + return d & 0xffffffff, h & 0xffffffff + + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x71374491); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcf); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba5); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25b); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b01); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a7); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c1); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc6); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dc); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c8); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf3); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x14292967); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a85); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b2138); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d13); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a7354); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c85); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a1); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664b); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a3); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd6990624); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e3585); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa070); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c08); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774c); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4a); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc70208); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506ceb); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2); + + dig = [] + for i, x in enumerate(sha_info['digest']): + dig.append( (x + ss[i]) & 0xffffffff ) + sha_info['digest'] = dig + +def sha_init(): + sha_info = new_shaobject() + sha_info['digest'] = [0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19] + sha_info['count_lo'] = 0 + sha_info['count_hi'] = 0 + sha_info['local'] = 0 + sha_info['digestsize'] = 32 + return sha_info + +def sha224_init(): + sha_info = new_shaobject() + sha_info['digest'] = [0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4] + sha_info['count_lo'] = 0 + sha_info['count_hi'] = 0 + sha_info['local'] = 0 + sha_info['digestsize'] = 28 + return sha_info + +def getbuf(s): + if isinstance(s, str): + return s + elif isinstance(s, unicode): + return str(s) + else: + return buffer(s) + +def sha_update(sha_info, buffer): + count = len(buffer) + buffer_idx = 0 + clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff + if clo < sha_info['count_lo']: + sha_info['count_hi'] += 1 + sha_info['count_lo'] = clo + + sha_info['count_hi'] += (count >> 29) + + if sha_info['local']: + i = SHA_BLOCKSIZE - sha_info['local'] + if i > count: + i = count + + # copy buffer + for x in enumerate(buffer[buffer_idx:buffer_idx+i]): + sha_info['data'][sha_info['local']+x[0]] = struct.unpack('B', x[1])[0] + + count -= i + buffer_idx += i + + sha_info['local'] += i + if sha_info['local'] == SHA_BLOCKSIZE: + sha_transform(sha_info) + sha_info['local'] = 0 + else: + return + + while count >= SHA_BLOCKSIZE: + # copy buffer + sha_info['data'] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE]] + count -= SHA_BLOCKSIZE + buffer_idx += SHA_BLOCKSIZE + sha_transform(sha_info) + + + # copy buffer + pos = sha_info['local'] + sha_info['data'][pos:pos+count] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + count]] + sha_info['local'] = count + +def sha_final(sha_info): + lo_bit_count = sha_info['count_lo'] + hi_bit_count = sha_info['count_hi'] + count = (lo_bit_count >> 3) & 0x3f + sha_info['data'][count] = 0x80; + count += 1 + if count > SHA_BLOCKSIZE - 8: + # zero the bytes in data after the count + sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count)) + sha_transform(sha_info) + # zero bytes in data + sha_info['data'] = [0] * SHA_BLOCKSIZE + else: + sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count)) + + sha_info['data'][56] = (hi_bit_count >> 24) & 0xff + sha_info['data'][57] = (hi_bit_count >> 16) & 0xff + sha_info['data'][58] = (hi_bit_count >> 8) & 0xff + sha_info['data'][59] = (hi_bit_count >> 0) & 0xff + sha_info['data'][60] = (lo_bit_count >> 24) & 0xff + sha_info['data'][61] = (lo_bit_count >> 16) & 0xff + sha_info['data'][62] = (lo_bit_count >> 8) & 0xff + sha_info['data'][63] = (lo_bit_count >> 0) & 0xff + + sha_transform(sha_info) + + dig = [] + for i in sha_info['digest']: + dig.extend([ ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ]) + return ''.join([chr(i) for i in dig]) + +class sha256(object): + digest_size = digestsize = SHA_DIGESTSIZE + block_size = SHA_BLOCKSIZE + + def __init__(self, s=None): + self._sha = sha_init() + if s: + sha_update(self._sha, getbuf(s)) + + def update(self, s): + sha_update(self._sha, getbuf(s)) + + def digest(self): + return sha_final(self._sha.copy())[:self._sha['digestsize']] + + def hexdigest(self): + return ''.join(['%.2x' % ord(i) for i in self.digest()]) + + def copy(self): + new = sha256.__new__(sha256) + new._sha = self._sha.copy() + return new + +class sha224(sha256): + digest_size = digestsize = 28 + + def __init__(self, s=None): + self._sha = sha224_init() + if s: + sha_update(self._sha, getbuf(s)) + + def copy(self): + new = sha224.__new__(sha224) + new._sha = self._sha.copy() + return new + +if __name__ == "__main__": + a_str = "just a test string" + + assert 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == sha256().hexdigest() + assert 'd7b553c6f09ac85d142415f857c5310f3bbbe7cdd787cce4b985acedd585266f' == sha256(a_str).hexdigest() + assert '8113ebf33c97daa9998762aacafe750c7cefc2b2f173c90c59663a57fe626f21' == sha256(a_str*7).hexdigest() + + s = sha256(a_str) + s.update(a_str) + assert '03d9963e05a094593190b6fc794cb1a3e1ac7d7883f0b5855268afeccc70d461' == s.hexdigest() diff --git a/awx/lib/site-packages/setuptools/_backport/hashlib/_sha512.py b/awx/lib/site-packages/setuptools/_backport/hashlib/_sha512.py new file mode 100644 index 0000000000..68ff46f308 --- /dev/null +++ b/awx/lib/site-packages/setuptools/_backport/hashlib/_sha512.py @@ -0,0 +1,288 @@ +""" +This code was Ported from CPython's sha512module.c +""" + +import struct + +SHA_BLOCKSIZE = 128 +SHA_DIGESTSIZE = 64 + + +def new_shaobject(): + return { + 'digest': [0]*8, + 'count_lo': 0, + 'count_hi': 0, + 'data': [0]* SHA_BLOCKSIZE, + 'local': 0, + 'digestsize': 0 + } + +ROR64 = lambda x, y: (((x & 0xffffffffffffffff) >> (y & 63)) | (x << (64 - (y & 63)))) & 0xffffffffffffffff +Ch = lambda x, y, z: (z ^ (x & (y ^ z))) +Maj = lambda x, y, z: (((x | y) & z) | (x & y)) +S = lambda x, n: ROR64(x, n) +R = lambda x, n: (x & 0xffffffffffffffff) >> n +Sigma0 = lambda x: (S(x, 28) ^ S(x, 34) ^ S(x, 39)) +Sigma1 = lambda x: (S(x, 14) ^ S(x, 18) ^ S(x, 41)) +Gamma0 = lambda x: (S(x, 1) ^ S(x, 8) ^ R(x, 7)) +Gamma1 = lambda x: (S(x, 19) ^ S(x, 61) ^ R(x, 6)) + +def sha_transform(sha_info): + W = [] + + d = sha_info['data'] + for i in xrange(0,16): + W.append( (d[8*i]<<56) + (d[8*i+1]<<48) + (d[8*i+2]<<40) + (d[8*i+3]<<32) + (d[8*i+4]<<24) + (d[8*i+5]<<16) + (d[8*i+6]<<8) + d[8*i+7]) + + for i in xrange(16,80): + W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffffffffffff ) + + ss = sha_info['digest'][:] + + def RND(a,b,c,d,e,f,g,h,i,ki): + t0 = (h + Sigma1(e) + Ch(e, f, g) + ki + W[i]) & 0xffffffffffffffff + t1 = (Sigma0(a) + Maj(a, b, c)) & 0xffffffffffffffff + d = (d + t0) & 0xffffffffffffffff + h = (t0 + t1) & 0xffffffffffffffff + return d & 0xffffffffffffffff, h & 0xffffffffffffffff + + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98d728ae22) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x7137449123ef65cd) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcfec4d3b2f) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba58189dbbc) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25bf348b538) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1b605d019) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4af194f9b) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5da6d8118) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98a3030242) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b0145706fbe) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be4ee4b28c) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3d5ffb4e2) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74f27b896f) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe3b1696b1) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a725c71235) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174cf692694) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c19ef14ad2) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786384f25e3) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc68b8cd5b5) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc77ac9c65) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f592b0275) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa6ea6e483) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dcbd41fbd4) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da831153b5) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152ee66dfab) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d2db43210) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c898fb213f) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7beef0ee4) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf33da88fc2) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147930aa725) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351e003826f) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x142929670a0e6e70) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a8546d22ffc) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b21385c26c926) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc5ac42aed) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d139d95b3df) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a73548baf63de) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb3c77b2a8) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e47edaee6) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c851482353b) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a14cf10364) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664bbc423001) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70d0f89791) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a30654be30) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819d6ef5218) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd69906245565a910) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e35855771202a) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa07032bbd1b8) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116b8d2d0c8) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c085141ab53) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774cdf8eeb99) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5e19b48a8) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3c5c95a63) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4ae3418acb) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f7763e373) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3d6b2b8a3) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee5defb2fc) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f43172f60) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814a1f0ab72) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc702081a6439ec) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa23631e28) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506cebde82bde9) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7b2c67915) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2e372532b) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],64,0xca273eceea26619c) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],65,0xd186b8c721c0c207) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],66,0xeada7dd6cde0eb1e) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],67,0xf57d4f7fee6ed178) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],68,0x06f067aa72176fba) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],69,0x0a637dc5a2c898a6) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],70,0x113f9804bef90dae) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],71,0x1b710b35131c471b) + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],72,0x28db77f523047d84) + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],73,0x32caab7b40c72493) + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],74,0x3c9ebe0a15c9bebc) + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],75,0x431d67c49c100d4c) + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],76,0x4cc5d4becb3e42b6) + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],77,0x597f299cfc657e2a) + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],78,0x5fcb6fab3ad6faec) + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],79,0x6c44198c4a475817) + + dig = [] + for i, x in enumerate(sha_info['digest']): + dig.append( (x + ss[i]) & 0xffffffffffffffff ) + sha_info['digest'] = dig + +def sha_init(): + sha_info = new_shaobject() + sha_info['digest'] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179] + sha_info['count_lo'] = 0 + sha_info['count_hi'] = 0 + sha_info['local'] = 0 + sha_info['digestsize'] = 64 + return sha_info + +def sha384_init(): + sha_info = new_shaobject() + sha_info['digest'] = [ 0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939, 0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4] + sha_info['count_lo'] = 0 + sha_info['count_hi'] = 0 + sha_info['local'] = 0 + sha_info['digestsize'] = 48 + return sha_info + +def getbuf(s): + if isinstance(s, str): + return s + elif isinstance(s, unicode): + return str(s) + else: + return buffer(s) + +def sha_update(sha_info, buffer): + count = len(buffer) + buffer_idx = 0 + clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff + if clo < sha_info['count_lo']: + sha_info['count_hi'] += 1 + sha_info['count_lo'] = clo + + sha_info['count_hi'] += (count >> 29) + + if sha_info['local']: + i = SHA_BLOCKSIZE - sha_info['local'] + if i > count: + i = count + + # copy buffer + for x in enumerate(buffer[buffer_idx:buffer_idx+i]): + sha_info['data'][sha_info['local']+x[0]] = struct.unpack('B', x[1])[0] + + count -= i + buffer_idx += i + + sha_info['local'] += i + if sha_info['local'] == SHA_BLOCKSIZE: + sha_transform(sha_info) + sha_info['local'] = 0 + else: + return + + while count >= SHA_BLOCKSIZE: + # copy buffer + sha_info['data'] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE]] + count -= SHA_BLOCKSIZE + buffer_idx += SHA_BLOCKSIZE + sha_transform(sha_info) + + # copy buffer + pos = sha_info['local'] + sha_info['data'][pos:pos+count] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + count]] + sha_info['local'] = count + +def sha_final(sha_info): + lo_bit_count = sha_info['count_lo'] + hi_bit_count = sha_info['count_hi'] + count = (lo_bit_count >> 3) & 0x7f + sha_info['data'][count] = 0x80; + count += 1 + if count > SHA_BLOCKSIZE - 16: + # zero the bytes in data after the count + sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count)) + sha_transform(sha_info) + # zero bytes in data + sha_info['data'] = [0] * SHA_BLOCKSIZE + else: + sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count)) + + sha_info['data'][112] = 0; + sha_info['data'][113] = 0; + sha_info['data'][114] = 0; + sha_info['data'][115] = 0; + sha_info['data'][116] = 0; + sha_info['data'][117] = 0; + sha_info['data'][118] = 0; + sha_info['data'][119] = 0; + + sha_info['data'][120] = (hi_bit_count >> 24) & 0xff + sha_info['data'][121] = (hi_bit_count >> 16) & 0xff + sha_info['data'][122] = (hi_bit_count >> 8) & 0xff + sha_info['data'][123] = (hi_bit_count >> 0) & 0xff + sha_info['data'][124] = (lo_bit_count >> 24) & 0xff + sha_info['data'][125] = (lo_bit_count >> 16) & 0xff + sha_info['data'][126] = (lo_bit_count >> 8) & 0xff + sha_info['data'][127] = (lo_bit_count >> 0) & 0xff + + sha_transform(sha_info) + + dig = [] + for i in sha_info['digest']: + dig.extend([ ((i>>56) & 0xff), ((i>>48) & 0xff), ((i>>40) & 0xff), ((i>>32) & 0xff), ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ]) + return ''.join([chr(i) for i in dig]) + +class sha512(object): + digest_size = digestsize = SHA_DIGESTSIZE + block_size = SHA_BLOCKSIZE + + def __init__(self, s=None): + self._sha = sha_init() + if s: + sha_update(self._sha, getbuf(s)) + + def update(self, s): + sha_update(self._sha, getbuf(s)) + + def digest(self): + return sha_final(self._sha.copy())[:self._sha['digestsize']] + + def hexdigest(self): + return ''.join(['%.2x' % ord(i) for i in self.digest()]) + + def copy(self): + new = sha512.__new__(sha512) + new._sha = self._sha.copy() + return new + +class sha384(sha512): + digest_size = digestsize = 48 + + def __init__(self, s=None): + self._sha = sha384_init() + if s: + sha_update(self._sha, getbuf(s)) + + def copy(self): + new = sha384.__new__(sha384) + new._sha = self._sha.copy() + return new + +if __name__ == "__main__": + a_str = "just a test string" + + assert sha512().hexdigest() == "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e" + assert sha512(a_str).hexdigest() == "68be4c6664af867dd1d01c8d77e963d87d77b702400c8fabae355a41b8927a5a5533a7f1c28509bbd65c5f3ac716f33be271fbda0ca018b71a84708c9fae8a53" + assert sha512(a_str*7).hexdigest() == "3233acdbfcfff9bff9fc72401d31dbffa62bd24e9ec846f0578d647da73258d9f0879f7fde01fe2cc6516af3f343807fdef79e23d696c923d79931db46bf1819" + + s = sha512(a_str) + s.update(a_str) + assert s.hexdigest() == "341aeb668730bbb48127d5531115f3c39d12cb9586a6ca770898398aff2411087cfe0b570689adf328cddeb1f00803acce6737a19f310b53bbdb0320828f75bb" diff --git a/awx/lib/site-packages/setuptools/archive_util.py b/awx/lib/site-packages/setuptools/archive_util.py new file mode 100644 index 0000000000..1109f34677 --- /dev/null +++ b/awx/lib/site-packages/setuptools/archive_util.py @@ -0,0 +1,210 @@ +"""Utilities for extracting common archive formats""" + + +__all__ = [ + "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", + "UnrecognizedFormat", "extraction_drivers", "unpack_directory", +] + +import zipfile, tarfile, os, shutil, posixpath +from pkg_resources import ensure_directory +from distutils.errors import DistutilsError + +class UnrecognizedFormat(DistutilsError): + """Couldn't recognize the archive type""" + +def default_filter(src,dst): + """The default progress/filter callback; returns True for all files""" + return dst + + + + + + + + + + + + + + + + + + + + + + + +def unpack_archive(filename, extract_dir, progress_filter=default_filter, + drivers=None +): + """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` + + `progress_filter` is a function taking two arguments: a source path + internal to the archive ('/'-separated), and a filesystem path where it + will be extracted. The callback must return the desired extract path + (which may be the same as the one passed in), or else ``None`` to skip + that file or directory. The callback can thus be used to report on the + progress of the extraction, as well as to filter the items extracted or + alter their extraction paths. + + `drivers`, if supplied, must be a non-empty sequence of functions with the + same signature as this function (minus the `drivers` argument), that raise + ``UnrecognizedFormat`` if they do not support extracting the designated + archive type. The `drivers` are tried in sequence until one is found that + does not raise an error, or until all are exhausted (in which case + ``UnrecognizedFormat`` is raised). If you do not supply a sequence of + drivers, the module's ``extraction_drivers`` constant will be used, which + means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that + order. + """ + for driver in drivers or extraction_drivers: + try: + driver(filename, extract_dir, progress_filter) + except UnrecognizedFormat: + continue + else: + return + else: + raise UnrecognizedFormat( + "Not a recognized archive type: %s" % filename + ) + + + + + + + +def unpack_directory(filename, extract_dir, progress_filter=default_filter): + """"Unpack" a directory, using the same interface as for archives + + Raises ``UnrecognizedFormat`` if `filename` is not a directory + """ + if not os.path.isdir(filename): + raise UnrecognizedFormat("%s is not a directory" % (filename,)) + + paths = {filename:('',extract_dir)} + for base, dirs, files in os.walk(filename): + src,dst = paths[base] + for d in dirs: + paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d) + for f in files: + name = src+f + target = os.path.join(dst,f) + target = progress_filter(src+f, target) + if not target: + continue # skip non-files + ensure_directory(target) + f = os.path.join(base,f) + shutil.copyfile(f, target) + shutil.copystat(f, target) + + + + + + + + + + + + + + + + + + +def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): + """Unpack zip `filename` to `extract_dir` + + Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined + by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation + of the `progress_filter` argument. + """ + + if not zipfile.is_zipfile(filename): + raise UnrecognizedFormat("%s is not a zip file" % (filename,)) + + z = zipfile.ZipFile(filename) + try: + for info in z.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name.split('/'): + continue + + target = os.path.join(extract_dir, *name.split('/')) + target = progress_filter(name, target) + if not target: + continue + if name.endswith('/'): + # directory + ensure_directory(target) + else: + # file + ensure_directory(target) + data = z.read(info.filename) + f = open(target,'wb') + try: + f.write(data) + finally: + f.close() + del data + unix_attributes = info.external_attr >> 16 + if unix_attributes: + os.chmod(target, unix_attributes) + finally: + z.close() + + +def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + + Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined + by ``tarfile.open()``). See ``unpack_archive()`` for an explanation + of the `progress_filter` argument. + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise UnrecognizedFormat( + "%s is not a compressed or uncompressed tar file" % (filename,) + ) + try: + tarobj.chown = lambda *args: None # don't do any chowning! + for member in tarobj: + name = member.name + # don't extract absolute paths or ones with .. in them + if not name.startswith('/') and '..' not in name.split('/'): + prelim_dst = os.path.join(extract_dir, *name.split('/')) + + # resolve any links and to extract the link targets as normal files + while member is not None and (member.islnk() or member.issym()): + linkpath = member.linkname + if member.issym(): + linkpath = posixpath.join(posixpath.dirname(member.name), linkpath) + linkpath = posixpath.normpath(linkpath) + member = tarobj._getmember(linkpath) + + if member is not None and (member.isfile() or member.isdir()): + final_dst = progress_filter(name, prelim_dst) + if final_dst: + if final_dst.endswith(os.sep): + final_dst = final_dst[:-1] + try: + tarobj._extract_member(member, final_dst) # XXX Ugh + except tarfile.ExtractError: + pass # chown/chmod/mkfifo/mknode/makedev failed + return True + finally: + tarobj.close() + +extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile diff --git a/awx/lib/site-packages/setuptools/command/__init__.py b/awx/lib/site-packages/setuptools/command/__init__.py new file mode 100644 index 0000000000..b063fa1925 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/__init__.py @@ -0,0 +1,21 @@ +__all__ = [ + 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop', + 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts', + 'sdist', 'setopt', 'test', 'upload', 'install_egg_info', 'install_scripts', + 'register', 'bdist_wininst', 'upload_docs', +] + +from setuptools.command import install_scripts +import sys + +if sys.version>='2.5': + # In Python 2.5 and above, distutils includes its own upload command + __all__.remove('upload') + +from distutils.command.bdist import bdist + +if 'egg' not in bdist.format_commands: + bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") + bdist.format_commands.append('egg') + +del bdist, sys diff --git a/awx/lib/site-packages/setuptools/command/alias.py b/awx/lib/site-packages/setuptools/command/alias.py new file mode 100644 index 0000000000..52384e1a28 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/alias.py @@ -0,0 +1,82 @@ +import distutils, os +from setuptools import Command +from distutils.util import convert_path +from distutils import log +from distutils.errors import * +from setuptools.command.setopt import edit_config, option_base, config_file + +def shquote(arg): + """Quote an argument for later parsing by shlex.split()""" + for c in '"', "'", "\\", "#": + if c in arg: return repr(arg) + if arg.split() != [arg]: + return repr(arg) + return arg + + +class alias(option_base): + """Define a shortcut that invokes one or more commands""" + + description = "define a shortcut to invoke one or more commands" + command_consumes_arguments = True + + user_options = [ + ('remove', 'r', 'remove (unset) the alias'), + ] + option_base.user_options + + boolean_options = option_base.boolean_options + ['remove'] + + def initialize_options(self): + option_base.initialize_options(self) + self.args = None + self.remove = None + + def finalize_options(self): + option_base.finalize_options(self) + if self.remove and len(self.args) != 1: + raise DistutilsOptionError( + "Must specify exactly one argument (the alias name) when " + "using --remove" + ) + + def run(self): + aliases = self.distribution.get_option_dict('aliases') + + if not self.args: + print("Command Aliases") + print("---------------") + for alias in aliases: + print("setup.py alias", format_alias(alias, aliases)) + return + + elif len(self.args)==1: + alias, = self.args + if self.remove: + command = None + elif alias in aliases: + print("setup.py alias", format_alias(alias, aliases)) + return + else: + print("No alias definition found for %r" % alias) + return + else: + alias = self.args[0] + command = ' '.join(map(shquote,self.args[1:])) + + edit_config(self.filename, {'aliases': {alias:command}}, self.dry_run) + + +def format_alias(name, aliases): + source, command = aliases[name] + if source == config_file('global'): + source = '--global-config ' + elif source == config_file('user'): + source = '--user-config ' + elif source == config_file('local'): + source = '' + else: + source = '--filename=%r' % source + return source+name+' '+command + + + diff --git a/awx/lib/site-packages/setuptools/command/bdist_egg.py b/awx/lib/site-packages/setuptools/command/bdist_egg.py new file mode 100644 index 0000000000..c577615824 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/bdist_egg.py @@ -0,0 +1,553 @@ +"""setuptools.command.bdist_egg + +Build .egg distributions""" + +# This module should be kept compatible with Python 2.3 +import sys, os, marshal +from setuptools import Command +from distutils.dir_util import remove_tree, mkpath +try: + # Python 2.7 or >=3.2 + from sysconfig import get_path, get_python_version + def _get_purelib(): + return get_path("purelib") +except ImportError: + from distutils.sysconfig import get_python_lib, get_python_version + def _get_purelib(): + return get_python_lib(False) + +from distutils import log +from distutils.errors import DistutilsSetupError +from pkg_resources import get_build_platform, Distribution, ensure_directory +from pkg_resources import EntryPoint +from types import CodeType +from setuptools.compat import basestring, next +from setuptools.extension import Library + +def strip_module(filename): + if '.' in filename: + filename = os.path.splitext(filename)[0] + if filename.endswith('module'): + filename = filename[:-6] + return filename + +def write_stub(resource, pyfile): + f = open(pyfile,'w') + f.write('\n'.join([ + "def __bootstrap__():", + " global __bootstrap__, __loader__, __file__", + " import sys, pkg_resources, imp", + " __file__ = pkg_resources.resource_filename(__name__,%r)" + % resource, + " __loader__ = None; del __bootstrap__, __loader__", + " imp.load_dynamic(__name__,__file__)", + "__bootstrap__()", + "" # terminal \n + ])) + f.close() + +# stub __init__.py for packages distributed without one +NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)' + +class bdist_egg(Command): + + description = "create an \"egg\" distribution" + + user_options = [ + ('bdist-dir=', 'b', + "temporary directory for creating the distribution"), + ('plat-name=', 'p', + "platform name to embed in generated filenames " + "(default: %s)" % get_build_platform()), + ('exclude-source-files', None, + "remove all .py files from the generated egg"), + ('keep-temp', 'k', + "keep the pseudo-installation tree around after " + + "creating the distribution archive"), + ('dist-dir=', 'd', + "directory to put final built distributions in"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ] + + boolean_options = [ + 'keep-temp', 'skip-build', 'exclude-source-files' + ] + + + + + + + + + + + + + + + + + + def initialize_options (self): + self.bdist_dir = None + self.plat_name = None + self.keep_temp = 0 + self.dist_dir = None + self.skip_build = 0 + self.egg_output = None + self.exclude_source_files = None + + + def finalize_options(self): + ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") + self.egg_info = ei_cmd.egg_info + + if self.bdist_dir is None: + bdist_base = self.get_finalized_command('bdist').bdist_base + self.bdist_dir = os.path.join(bdist_base, 'egg') + + if self.plat_name is None: + self.plat_name = get_build_platform() + + self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) + + if self.egg_output is None: + + # Compute filename of the output egg + basename = Distribution( + None, None, ei_cmd.egg_name, ei_cmd.egg_version, + get_python_version(), + self.distribution.has_ext_modules() and self.plat_name + ).egg_name() + + self.egg_output = os.path.join(self.dist_dir, basename+'.egg') + + + + + + + + + def do_install_data(self): + # Hack for packages that install data to install's --install-lib + self.get_finalized_command('install').install_lib = self.bdist_dir + + site_packages = os.path.normcase(os.path.realpath(_get_purelib())) + old, self.distribution.data_files = self.distribution.data_files,[] + + for item in old: + if isinstance(item,tuple) and len(item)==2: + if os.path.isabs(item[0]): + realpath = os.path.realpath(item[0]) + normalized = os.path.normcase(realpath) + if normalized==site_packages or normalized.startswith( + site_packages+os.sep + ): + item = realpath[len(site_packages)+1:], item[1] + # XXX else: raise ??? + self.distribution.data_files.append(item) + + try: + log.info("installing package data to %s" % self.bdist_dir) + self.call_command('install_data', force=0, root=None) + finally: + self.distribution.data_files = old + + + def get_outputs(self): + return [self.egg_output] + + + def call_command(self,cmdname,**kw): + """Invoke reinitialized command `cmdname` with keyword args""" + for dirname in INSTALL_DIRECTORY_ATTRS: + kw.setdefault(dirname,self.bdist_dir) + kw.setdefault('skip_build',self.skip_build) + kw.setdefault('dry_run', self.dry_run) + cmd = self.reinitialize_command(cmdname, **kw) + self.run_command(cmdname) + return cmd + + + def run(self): + # Generate metadata first + self.run_command("egg_info") + # We run install_lib before install_data, because some data hacks + # pull their data path from the install_lib command. + log.info("installing library code to %s" % self.bdist_dir) + instcmd = self.get_finalized_command('install') + old_root = instcmd.root; instcmd.root = None + if self.distribution.has_c_libraries() and not self.skip_build: + self.run_command('build_clib') + cmd = self.call_command('install_lib', warn_dir=0) + instcmd.root = old_root + + all_outputs, ext_outputs = self.get_ext_outputs() + self.stubs = [] + to_compile = [] + for (p,ext_name) in enumerate(ext_outputs): + filename,ext = os.path.splitext(ext_name) + pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py') + self.stubs.append(pyfile) + log.info("creating stub loader for %s" % ext_name) + if not self.dry_run: + write_stub(os.path.basename(ext_name), pyfile) + to_compile.append(pyfile) + ext_outputs[p] = ext_name.replace(os.sep,'/') + + to_compile.extend(self.make_init_files()) + if to_compile: + cmd.byte_compile(to_compile) + if self.distribution.data_files: + self.do_install_data() + + # Make the EGG-INFO directory + archive_root = self.bdist_dir + egg_info = os.path.join(archive_root,'EGG-INFO') + self.mkpath(egg_info) + if self.distribution.scripts: + script_dir = os.path.join(egg_info, 'scripts') + log.info("installing scripts to %s" % script_dir) + self.call_command('install_scripts',install_dir=script_dir,no_ep=1) + + self.copy_metadata_to(egg_info) + native_libs = os.path.join(egg_info, "native_libs.txt") + if all_outputs: + log.info("writing %s" % native_libs) + if not self.dry_run: + ensure_directory(native_libs) + libs_file = open(native_libs, 'wt') + libs_file.write('\n'.join(all_outputs)) + libs_file.write('\n') + libs_file.close() + elif os.path.isfile(native_libs): + log.info("removing %s" % native_libs) + if not self.dry_run: + os.unlink(native_libs) + + write_safety_flag( + os.path.join(archive_root,'EGG-INFO'), self.zip_safe() + ) + + if os.path.exists(os.path.join(self.egg_info,'depends.txt')): + log.warn( + "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" + "Use the install_requires/extras_require setup() args instead." + ) + + if self.exclude_source_files: + self.zap_pyfiles() + + # Make the archive + make_zipfile(self.egg_output, archive_root, verbose=self.verbose, + dry_run=self.dry_run, mode=self.gen_header()) + if not self.keep_temp: + remove_tree(self.bdist_dir, dry_run=self.dry_run) + + # Add to 'Distribution.dist_files' so that the "upload" command works + getattr(self.distribution,'dist_files',[]).append( + ('bdist_egg',get_python_version(),self.egg_output)) + + + + + def zap_pyfiles(self): + log.info("Removing .py files from temporary directory") + for base,dirs,files in walk_egg(self.bdist_dir): + for name in files: + if name.endswith('.py'): + path = os.path.join(base,name) + log.debug("Deleting %s", path) + os.unlink(path) + + def zip_safe(self): + safe = getattr(self.distribution,'zip_safe',None) + if safe is not None: + return safe + log.warn("zip_safe flag not set; analyzing archive contents...") + return analyze_egg(self.bdist_dir, self.stubs) + + def make_init_files(self): + """Create missing package __init__ files""" + init_files = [] + for base,dirs,files in walk_egg(self.bdist_dir): + if base==self.bdist_dir: + # don't put an __init__ in the root + continue + for name in files: + if name.endswith('.py'): + if '__init__.py' not in files: + pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.') + if self.distribution.has_contents_for(pkg): + log.warn("Creating missing __init__.py for %s",pkg) + filename = os.path.join(base,'__init__.py') + if not self.dry_run: + f = open(filename,'w'); f.write(NS_PKG_STUB) + f.close() + init_files.append(filename) + break + else: + # not a package, don't traverse to subdirectories + dirs[:] = [] + + return init_files + + def gen_header(self): + epm = EntryPoint.parse_map(self.distribution.entry_points or '') + ep = epm.get('setuptools.installation',{}).get('eggsecutable') + if ep is None: + return 'w' # not an eggsecutable, do it the usual way. + + if not ep.attrs or ep.extras: + raise DistutilsSetupError( + "eggsecutable entry point (%r) cannot have 'extras' " + "or refer to a module" % (ep,) + ) + + pyver = sys.version[:3] + pkg = ep.module_name + full = '.'.join(ep.attrs) + base = ep.attrs[0] + basename = os.path.basename(self.egg_output) + + header = ( + "#!/bin/sh\n" + 'if [ `basename $0` = "%(basename)s" ]\n' + 'then exec python%(pyver)s -c "' + "import sys, os; sys.path.insert(0, os.path.abspath('$0')); " + "from %(pkg)s import %(base)s; sys.exit(%(full)s())" + '" "$@"\n' + 'else\n' + ' echo $0 is not the correct name for this egg file.\n' + ' echo Please rename it back to %(basename)s and try again.\n' + ' exec false\n' + 'fi\n' + + ) % locals() + + if not self.dry_run: + mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run) + f = open(self.egg_output, 'w') + f.write(header) + f.close() + return 'a' + + + def copy_metadata_to(self, target_dir): + "Copy metadata (egg info) to the target_dir" + # normalize the path (so that a forward-slash in egg_info will + # match using startswith below) + norm_egg_info = os.path.normpath(self.egg_info) + prefix = os.path.join(norm_egg_info,'') + for path in self.ei_cmd.filelist.files: + if path.startswith(prefix): + target = os.path.join(target_dir, path[len(prefix):]) + ensure_directory(target) + self.copy_file(path, target) + + def get_ext_outputs(self): + """Get a list of relative paths to C extensions in the output distro""" + + all_outputs = [] + ext_outputs = [] + + paths = {self.bdist_dir:''} + for base, dirs, files in os.walk(self.bdist_dir): + for filename in files: + if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: + all_outputs.append(paths[base]+filename) + for filename in dirs: + paths[os.path.join(base,filename)] = paths[base]+filename+'/' + + if self.distribution.has_ext_modules(): + build_cmd = self.get_finalized_command('build_ext') + for ext in build_cmd.extensions: + if isinstance(ext,Library): + continue + fullname = build_cmd.get_ext_fullname(ext.name) + filename = build_cmd.get_ext_filename(fullname) + if not os.path.basename(filename).startswith('dl-'): + if os.path.exists(os.path.join(self.bdist_dir,filename)): + ext_outputs.append(filename) + + return all_outputs, ext_outputs + + +NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) + + + + +def walk_egg(egg_dir): + """Walk an unpacked egg's contents, skipping the metadata directory""" + walker = os.walk(egg_dir) + base,dirs,files = next(walker) + if 'EGG-INFO' in dirs: + dirs.remove('EGG-INFO') + yield base,dirs,files + for bdf in walker: + yield bdf + +def analyze_egg(egg_dir, stubs): + # check for existing flag in EGG-INFO + for flag,fn in safety_flags.items(): + if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)): + return flag + if not can_scan(): return False + safe = True + for base, dirs, files in walk_egg(egg_dir): + for name in files: + if name.endswith('.py') or name.endswith('.pyw'): + continue + elif name.endswith('.pyc') or name.endswith('.pyo'): + # always scan, even if we already know we're not safe + safe = scan_module(egg_dir, base, name, stubs) and safe + return safe + +def write_safety_flag(egg_dir, safe): + # Write or remove zip safety flag file(s) + for flag,fn in safety_flags.items(): + fn = os.path.join(egg_dir, fn) + if os.path.exists(fn): + if safe is None or bool(safe) != flag: + os.unlink(fn) + elif safe is not None and bool(safe)==flag: + f=open(fn,'wt'); f.write('\n'); f.close() + +safety_flags = { + True: 'zip-safe', + False: 'not-zip-safe', +} + +def scan_module(egg_dir, base, name, stubs): + """Check whether module possibly uses unsafe-for-zipfile stuff""" + + filename = os.path.join(base,name) + if filename[:-1] in stubs: + return True # Extension module + pkg = base[len(egg_dir)+1:].replace(os.sep,'.') + module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] + if sys.version_info < (3, 3): + skip = 8 # skip magic & date + else: + skip = 12 # skip magic & date & file size + f = open(filename,'rb'); f.read(skip) + code = marshal.load(f); f.close() + safe = True + symbols = dict.fromkeys(iter_symbols(code)) + for bad in ['__file__', '__path__']: + if bad in symbols: + log.warn("%s: module references %s", module, bad) + safe = False + if 'inspect' in symbols: + for bad in [ + 'getsource', 'getabsfile', 'getsourcefile', 'getfile' + 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', + 'getinnerframes', 'getouterframes', 'stack', 'trace' + ]: + if bad in symbols: + log.warn("%s: module MAY be using inspect.%s", module, bad) + safe = False + if '__name__' in symbols and '__main__' in symbols and '.' not in module: + if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 + log.warn("%s: top-level module may be 'python -m' script", module) + safe = False + return safe + +def iter_symbols(code): + """Yield names and strings used by `code` and its nested code objects""" + for name in code.co_names: yield name + for const in code.co_consts: + if isinstance(const,basestring): + yield const + elif isinstance(const,CodeType): + for name in iter_symbols(const): + yield name + +def can_scan(): + if not sys.platform.startswith('java') and sys.platform != 'cli': + # CPython, PyPy, etc. + return True + log.warn("Unable to analyze compiled code on this platform.") + log.warn("Please ask the author to include a 'zip_safe'" + " setting (either True or False) in the package's setup.py") + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Attribute names of options for commands that might need to be convinced to +# install to the egg build directory + +INSTALL_DIRECTORY_ATTRS = [ + 'install_lib', 'install_dir', 'install_data', 'install_base' +] + +def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None, + mode='w' +): + """Create a zip file from all the files under 'base_dir'. The output + zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" + Python module (if available) or the InfoZIP "zip" utility (if installed + and found on the default search path). If neither tool is available, + raises DistutilsExecError. Returns the name of the output zip file. + """ + import zipfile + mkpath(os.path.dirname(zip_filename), dry_run=dry_run) + log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) + + def visit(z, dirname, names): + for name in names: + path = os.path.normpath(os.path.join(dirname, name)) + if os.path.isfile(path): + p = path[len(base_dir)+1:] + if not dry_run: + z.write(path, p) + log.debug("adding '%s'" % p) + + if compress is None: + compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits + + compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)] + if not dry_run: + z = zipfile.ZipFile(zip_filename, mode, compression=compression) + for dirname, dirs, files in os.walk(base_dir): + visit(z, dirname, files) + z.close() + else: + for dirname, dirs, files in os.walk(base_dir): + visit(None, dirname, files) + return zip_filename +# diff --git a/awx/lib/site-packages/setuptools/command/bdist_rpm.py b/awx/lib/site-packages/setuptools/command/bdist_rpm.py new file mode 100644 index 0000000000..8c48da3559 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/bdist_rpm.py @@ -0,0 +1,82 @@ +# This is just a kludge so that bdist_rpm doesn't guess wrong about the +# distribution name and version, if the egg_info command is going to alter +# them, another kludge to allow you to build old-style non-egg RPMs, and +# finally, a kludge to track .rpm files for uploading when run on Python <2.5. + +from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm +import sys, os + +class bdist_rpm(_bdist_rpm): + + def initialize_options(self): + _bdist_rpm.initialize_options(self) + self.no_egg = None + + if sys.version<"2.5": + # Track for uploading any .rpm file(s) moved to self.dist_dir + def move_file(self, src, dst, level=1): + _bdist_rpm.move_file(self, src, dst, level) + if dst==self.dist_dir and src.endswith('.rpm'): + getattr(self.distribution,'dist_files',[]).append( + ('bdist_rpm', + src.endswith('.src.rpm') and 'any' or sys.version[:3], + os.path.join(dst, os.path.basename(src))) + ) + + def run(self): + self.run_command('egg_info') # ensure distro name is up-to-date + _bdist_rpm.run(self) + + + + + + + + + + + + + + def _make_spec_file(self): + version = self.distribution.get_version() + rpmversion = version.replace('-','_') + spec = _bdist_rpm._make_spec_file(self) + line23 = '%define version '+version + line24 = '%define version '+rpmversion + spec = [ + line.replace( + "Source0: %{name}-%{version}.tar", + "Source0: %{name}-%{unmangled_version}.tar" + ).replace( + "setup.py install ", + "setup.py install --single-version-externally-managed " + ).replace( + "%setup", + "%setup -n %{name}-%{unmangled_version}" + ).replace(line23,line24) + for line in spec + ] + spec.insert(spec.index(line24)+1, "%define unmangled_version "+version) + return spec + + + + + + + + + + + + + + + + + + + + diff --git a/awx/lib/site-packages/setuptools/command/bdist_wininst.py b/awx/lib/site-packages/setuptools/command/bdist_wininst.py new file mode 100644 index 0000000000..e8521f834c --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/bdist_wininst.py @@ -0,0 +1,82 @@ +from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst +import os, sys + +class bdist_wininst(_bdist_wininst): + _good_upload = _bad_upload = None + + def create_exe(self, arcname, fullname, bitmap=None): + _bdist_wininst.create_exe(self, arcname, fullname, bitmap) + installer_name = self.get_installer_filename(fullname) + if self.target_version: + pyversion = self.target_version + # fix 2.5+ bdist_wininst ignoring --target-version spec + self._bad_upload = ('bdist_wininst', 'any', installer_name) + else: + pyversion = 'any' + self._good_upload = ('bdist_wininst', pyversion, installer_name) + + def _fix_upload_names(self): + good, bad = self._good_upload, self._bad_upload + dist_files = getattr(self.distribution, 'dist_files', []) + if bad in dist_files: + dist_files.remove(bad) + if good not in dist_files: + dist_files.append(good) + + def reinitialize_command (self, command, reinit_subcommands=0): + cmd = self.distribution.reinitialize_command( + command, reinit_subcommands) + if command in ('install', 'install_lib'): + cmd.install_lib = None # work around distutils bug + return cmd + + def run(self): + self._is_running = True + try: + _bdist_wininst.run(self) + self._fix_upload_names() + finally: + self._is_running = False + + + if not hasattr(_bdist_wininst, 'get_installer_filename'): + def get_installer_filename(self, fullname): + # Factored out to allow overriding in subclasses + if self.target_version: + # if we create an installer for a specific python version, + # it's better to include this in the name + installer_name = os.path.join(self.dist_dir, + "%s.win32-py%s.exe" % + (fullname, self.target_version)) + else: + installer_name = os.path.join(self.dist_dir, + "%s.win32.exe" % fullname) + return installer_name + # get_installer_filename() + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/awx/lib/site-packages/setuptools/command/build_ext.py b/awx/lib/site-packages/setuptools/command/build_ext.py new file mode 100644 index 0000000000..50a039ce50 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/build_ext.py @@ -0,0 +1,298 @@ +from distutils.command.build_ext import build_ext as _du_build_ext +try: + # Attempt to use Pyrex for building extensions, if available + from Pyrex.Distutils.build_ext import build_ext as _build_ext +except ImportError: + _build_ext = _du_build_ext + +import os, sys +from distutils.file_util import copy_file +from setuptools.extension import Library +from distutils.ccompiler import new_compiler +from distutils.sysconfig import customize_compiler +try: + # Python 2.7 or >=3.2 + from sysconfig import _CONFIG_VARS +except ImportError: + from distutils.sysconfig import get_config_var + get_config_var("LDSHARED") # make sure _config_vars is initialized + del get_config_var + from distutils.sysconfig import _config_vars as _CONFIG_VARS +from distutils import log +from distutils.errors import * + +have_rtld = False +use_stubs = False +libtype = 'shared' + +if sys.platform == "darwin": + use_stubs = True +elif os.name != 'nt': + try: + from dl import RTLD_NOW + have_rtld = True + use_stubs = True + except ImportError: + pass + +def if_dl(s): + if have_rtld: + return s + return '' + + + + + + +class build_ext(_build_ext): + def run(self): + """Build extensions in build directory, then copy if --inplace""" + old_inplace, self.inplace = self.inplace, 0 + _build_ext.run(self) + self.inplace = old_inplace + if old_inplace: + self.copy_extensions_to_source() + + def copy_extensions_to_source(self): + build_py = self.get_finalized_command('build_py') + for ext in self.extensions: + fullname = self.get_ext_fullname(ext.name) + filename = self.get_ext_filename(fullname) + modpath = fullname.split('.') + package = '.'.join(modpath[:-1]) + package_dir = build_py.get_package_dir(package) + dest_filename = os.path.join(package_dir,os.path.basename(filename)) + src_filename = os.path.join(self.build_lib,filename) + + # Always copy, even if source is older than destination, to ensure + # that the right extensions for the current Python/platform are + # used. + copy_file( + src_filename, dest_filename, verbose=self.verbose, + dry_run=self.dry_run + ) + if ext._needs_stub: + self.write_stub(package_dir or os.curdir, ext, True) + + + if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'): + # Workaround for problems using some Pyrex versions w/SWIG and/or 2.4 + def swig_sources(self, sources, *otherargs): + # first do any Pyrex processing + sources = _build_ext.swig_sources(self, sources) or sources + # Then do any actual SWIG stuff on the remainder + return _du_build_ext.swig_sources(self, sources, *otherargs) + + + + def get_ext_filename(self, fullname): + filename = _build_ext.get_ext_filename(self,fullname) + if fullname in self.ext_map: + ext = self.ext_map[fullname] + if isinstance(ext,Library): + fn, ext = os.path.splitext(filename) + return self.shlib_compiler.library_filename(fn,libtype) + elif use_stubs and ext._links_to_dynamic: + d,fn = os.path.split(filename) + return os.path.join(d,'dl-'+fn) + return filename + + def initialize_options(self): + _build_ext.initialize_options(self) + self.shlib_compiler = None + self.shlibs = [] + self.ext_map = {} + + def finalize_options(self): + _build_ext.finalize_options(self) + self.extensions = self.extensions or [] + self.check_extensions_list(self.extensions) + self.shlibs = [ext for ext in self.extensions + if isinstance(ext,Library)] + if self.shlibs: + self.setup_shlib_compiler() + for ext in self.extensions: + ext._full_name = self.get_ext_fullname(ext.name) + for ext in self.extensions: + fullname = ext._full_name + self.ext_map[fullname] = ext + + # distutils 3.1 will also ask for module names + # XXX what to do with conflicts? + self.ext_map[fullname.split('.')[-1]] = ext + + ltd = ext._links_to_dynamic = \ + self.shlibs and self.links_to_dynamic(ext) or False + ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library) + filename = ext._file_name = self.get_ext_filename(fullname) + libdir = os.path.dirname(os.path.join(self.build_lib,filename)) + if ltd and libdir not in ext.library_dirs: + ext.library_dirs.append(libdir) + if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: + ext.runtime_library_dirs.append(os.curdir) + + def setup_shlib_compiler(self): + compiler = self.shlib_compiler = new_compiler( + compiler=self.compiler, dry_run=self.dry_run, force=self.force + ) + if sys.platform == "darwin": + tmp = _CONFIG_VARS.copy() + try: + # XXX Help! I don't have any idea whether these are right... + _CONFIG_VARS['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup" + _CONFIG_VARS['CCSHARED'] = " -dynamiclib" + _CONFIG_VARS['SO'] = ".dylib" + customize_compiler(compiler) + finally: + _CONFIG_VARS.clear() + _CONFIG_VARS.update(tmp) + else: + customize_compiler(compiler) + + if self.include_dirs is not None: + compiler.set_include_dirs(self.include_dirs) + if self.define is not None: + # 'define' option is a list of (name,value) tuples + for (name,value) in self.define: + compiler.define_macro(name, value) + if self.undef is not None: + for macro in self.undef: + compiler.undefine_macro(macro) + if self.libraries is not None: + compiler.set_libraries(self.libraries) + if self.library_dirs is not None: + compiler.set_library_dirs(self.library_dirs) + if self.rpath is not None: + compiler.set_runtime_library_dirs(self.rpath) + if self.link_objects is not None: + compiler.set_link_objects(self.link_objects) + + # hack so distutils' build_extension() builds a library instead + compiler.link_shared_object = link_shared_object.__get__(compiler) + + + + def get_export_symbols(self, ext): + if isinstance(ext,Library): + return ext.export_symbols + return _build_ext.get_export_symbols(self,ext) + + def build_extension(self, ext): + _compiler = self.compiler + try: + if isinstance(ext,Library): + self.compiler = self.shlib_compiler + _build_ext.build_extension(self,ext) + if ext._needs_stub: + self.write_stub( + self.get_finalized_command('build_py').build_lib, ext + ) + finally: + self.compiler = _compiler + + def links_to_dynamic(self, ext): + """Return true if 'ext' links to a dynamic lib in the same package""" + # XXX this should check to ensure the lib is actually being built + # XXX as dynamic, and not just using a locally-found version or a + # XXX static-compiled version + libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) + pkg = '.'.join(ext._full_name.split('.')[:-1]+['']) + for libname in ext.libraries: + if pkg+libname in libnames: return True + return False + + def get_outputs(self): + outputs = _build_ext.get_outputs(self) + optimize = self.get_finalized_command('build_py').optimize + for ext in self.extensions: + if ext._needs_stub: + base = os.path.join(self.build_lib, *ext._full_name.split('.')) + outputs.append(base+'.py') + outputs.append(base+'.pyc') + if optimize: + outputs.append(base+'.pyo') + return outputs + + def write_stub(self, output_dir, ext, compile=False): + log.info("writing stub loader for %s to %s",ext._full_name, output_dir) + stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py' + if compile and os.path.exists(stub_file): + raise DistutilsError(stub_file+" already exists! Please delete.") + if not self.dry_run: + f = open(stub_file,'w') + f.write('\n'.join([ + "def __bootstrap__():", + " global __bootstrap__, __file__, __loader__", + " import sys, os, pkg_resources, imp"+if_dl(", dl"), + " __file__ = pkg_resources.resource_filename(__name__,%r)" + % os.path.basename(ext._file_name), + " del __bootstrap__", + " if '__loader__' in globals():", + " del __loader__", + if_dl(" old_flags = sys.getdlopenflags()"), + " old_dir = os.getcwd()", + " try:", + " os.chdir(os.path.dirname(__file__))", + if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), + " imp.load_dynamic(__name__,__file__)", + " finally:", + if_dl(" sys.setdlopenflags(old_flags)"), + " os.chdir(old_dir)", + "__bootstrap__()", + "" # terminal \n + ])) + f.close() + if compile: + from distutils.util import byte_compile + byte_compile([stub_file], optimize=0, + force=True, dry_run=self.dry_run) + optimize = self.get_finalized_command('install_lib').optimize + if optimize > 0: + byte_compile([stub_file], optimize=optimize, + force=True, dry_run=self.dry_run) + if os.path.exists(stub_file) and not self.dry_run: + os.unlink(stub_file) + + +if use_stubs or os.name=='nt': + # Build shared libraries + # + def link_shared_object(self, objects, output_libname, output_dir=None, + libraries=None, library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None + ): self.link( + self.SHARED_LIBRARY, objects, output_libname, + output_dir, libraries, library_dirs, runtime_library_dirs, + export_symbols, debug, extra_preargs, extra_postargs, + build_temp, target_lang + ) +else: + # Build static libraries everywhere else + libtype = 'static' + + def link_shared_object(self, objects, output_libname, output_dir=None, + libraries=None, library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None + ): + # XXX we need to either disallow these attrs on Library instances, + # or warn/abort here if set, or something... + #libraries=None, library_dirs=None, runtime_library_dirs=None, + #export_symbols=None, extra_preargs=None, extra_postargs=None, + #build_temp=None + + assert output_dir is None # distutils build_ext doesn't pass this + output_dir,filename = os.path.split(output_libname) + basename, ext = os.path.splitext(filename) + if self.library_filename("x").startswith('lib'): + # strip 'lib' prefix; this is kludgy if some platform uses + # a different prefix + basename = basename[3:] + + self.create_static_lib( + objects, basename, output_dir, debug, target_lang + ) + + diff --git a/awx/lib/site-packages/setuptools/command/build_py.py b/awx/lib/site-packages/setuptools/command/build_py.py new file mode 100644 index 0000000000..8751acd493 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/build_py.py @@ -0,0 +1,280 @@ +import os.path, sys, fnmatch +from distutils.command.build_py import build_py as _build_py +from distutils.util import convert_path +from glob import glob + +try: + from distutils.util import Mixin2to3 as _Mixin2to3 + # add support for converting doctests that is missing in 3.1 distutils + from distutils import log + from lib2to3.refactor import RefactoringTool, get_fixers_from_package + import setuptools + class DistutilsRefactoringTool(RefactoringTool): + def log_error(self, msg, *args, **kw): + log.error(msg, *args) + + def log_message(self, msg, *args): + log.info(msg, *args) + + def log_debug(self, msg, *args): + log.debug(msg, *args) + + class Mixin2to3(_Mixin2to3): + def run_2to3(self, files, doctests = False): + # See of the distribution option has been set, otherwise check the + # setuptools default. + if self.distribution.use_2to3 is not True: + return + if not files: + return + log.info("Fixing "+" ".join(files)) + self.__build_fixer_names() + self.__exclude_fixers() + if doctests: + if setuptools.run_2to3_on_doctests: + r = DistutilsRefactoringTool(self.fixer_names) + r.refactor(files, write=True, doctests_only=True) + else: + _Mixin2to3.run_2to3(self, files) + + def __build_fixer_names(self): + if self.fixer_names: return + self.fixer_names = [] + for p in setuptools.lib2to3_fixer_packages: + self.fixer_names.extend(get_fixers_from_package(p)) + if self.distribution.use_2to3_fixers is not None: + for p in self.distribution.use_2to3_fixers: + self.fixer_names.extend(get_fixers_from_package(p)) + + def __exclude_fixers(self): + excluded_fixers = getattr(self, 'exclude_fixers', []) + if self.distribution.use_2to3_exclude_fixers is not None: + excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers) + for fixer_name in excluded_fixers: + if fixer_name in self.fixer_names: + self.fixer_names.remove(fixer_name) + +except ImportError: + class Mixin2to3: + def run_2to3(self, files, doctests=True): + # Nothing done in 2.x + pass + +class build_py(_build_py, Mixin2to3): + """Enhanced 'build_py' command that includes data files with packages + + The data files are specified via a 'package_data' argument to 'setup()'. + See 'setuptools.dist.Distribution' for more details. + + Also, this version of the 'build_py' command allows you to specify both + 'py_modules' and 'packages' in the same setup operation. + """ + def finalize_options(self): + _build_py.finalize_options(self) + self.package_data = self.distribution.package_data + self.exclude_package_data = self.distribution.exclude_package_data or {} + if 'data_files' in self.__dict__: del self.__dict__['data_files'] + self.__updated_files = [] + self.__doctests_2to3 = [] + + def run(self): + """Build modules, packages, and copy data files to build directory""" + if not self.py_modules and not self.packages: + return + + if self.py_modules: + self.build_modules() + + if self.packages: + self.build_packages() + self.build_package_data() + + self.run_2to3(self.__updated_files, False) + self.run_2to3(self.__updated_files, True) + self.run_2to3(self.__doctests_2to3, True) + + # Only compile actual .py files, using our base class' idea of what our + # output files are. + self.byte_compile(_build_py.get_outputs(self, include_bytecode=0)) + + def __getattr__(self,attr): + if attr=='data_files': # lazily compute data files + self.data_files = files = self._get_data_files(); return files + return _build_py.__getattr__(self,attr) + + def build_module(self, module, module_file, package): + outfile, copied = _build_py.build_module(self, module, module_file, package) + if copied: + self.__updated_files.append(outfile) + return outfile, copied + + def _get_data_files(self): + """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" + self.analyze_manifest() + data = [] + for package in self.packages or (): + # Locate package source directory + src_dir = self.get_package_dir(package) + + # Compute package build directory + build_dir = os.path.join(*([self.build_lib] + package.split('.'))) + + # Length of path to strip from found files + plen = len(src_dir)+1 + + # Strip directory from globbed filenames + filenames = [ + file[plen:] for file in self.find_data_files(package, src_dir) + ] + data.append( (package, src_dir, build_dir, filenames) ) + return data + + def find_data_files(self, package, src_dir): + """Return filenames for package's data files in 'src_dir'""" + globs = (self.package_data.get('', []) + + self.package_data.get(package, [])) + files = self.manifest_files.get(package, [])[:] + for pattern in globs: + # Each pattern has to be converted to a platform-specific path + files.extend(glob(os.path.join(src_dir, convert_path(pattern)))) + return self.exclude_data_files(package, src_dir, files) + + def build_package_data(self): + """Copy data files into build directory""" + lastdir = None + for package, src_dir, build_dir, filenames in self.data_files: + for filename in filenames: + target = os.path.join(build_dir, filename) + self.mkpath(os.path.dirname(target)) + srcfile = os.path.join(src_dir, filename) + outf, copied = self.copy_file(srcfile, target) + srcfile = os.path.abspath(srcfile) + if copied and srcfile in self.distribution.convert_2to3_doctests: + self.__doctests_2to3.append(outf) + + + def analyze_manifest(self): + self.manifest_files = mf = {} + if not self.distribution.include_package_data: + return + src_dirs = {} + for package in self.packages or (): + # Locate package source directory + src_dirs[assert_relative(self.get_package_dir(package))] = package + + self.run_command('egg_info') + ei_cmd = self.get_finalized_command('egg_info') + for path in ei_cmd.filelist.files: + d,f = os.path.split(assert_relative(path)) + prev = None + oldf = f + while d and d!=prev and d not in src_dirs: + prev = d + d, df = os.path.split(d) + f = os.path.join(df, f) + if d in src_dirs: + if path.endswith('.py') and f==oldf: + continue # it's a module, not data + mf.setdefault(src_dirs[d],[]).append(path) + + def get_data_files(self): pass # kludge 2.4 for lazy computation + + if sys.version<"2.4": # Python 2.4 already has this code + def get_outputs(self, include_bytecode=1): + """Return complete list of files copied to the build directory + + This includes both '.py' files and data files, as well as '.pyc' + and '.pyo' files if 'include_bytecode' is true. (This method is + needed for the 'install_lib' command to do its job properly, and to + generate a correct installation manifest.) + """ + return _build_py.get_outputs(self, include_bytecode) + [ + os.path.join(build_dir, filename) + for package, src_dir, build_dir,filenames in self.data_files + for filename in filenames + ] + + def check_package(self, package, package_dir): + """Check namespace packages' __init__ for declare_namespace""" + try: + return self.packages_checked[package] + except KeyError: + pass + + init_py = _build_py.check_package(self, package, package_dir) + self.packages_checked[package] = init_py + + if not init_py or not self.distribution.namespace_packages: + return init_py + + for pkg in self.distribution.namespace_packages: + if pkg==package or pkg.startswith(package+'.'): + break + else: + return init_py + + f = open(init_py,'rbU') + if 'declare_namespace'.encode() not in f.read(): + from distutils import log + log.warn( + "WARNING: %s is a namespace package, but its __init__.py does\n" + "not declare_namespace(); setuptools 0.7 will REQUIRE this!\n" + '(See the setuptools manual under "Namespace Packages" for ' + "details.)\n", package + ) + f.close() + return init_py + + def initialize_options(self): + self.packages_checked={} + _build_py.initialize_options(self) + + + def get_package_dir(self, package): + res = _build_py.get_package_dir(self, package) + if self.distribution.src_root is not None: + return os.path.join(self.distribution.src_root, res) + return res + + + def exclude_data_files(self, package, src_dir, files): + """Filter filenames for package's data files in 'src_dir'""" + globs = (self.exclude_package_data.get('', []) + + self.exclude_package_data.get(package, [])) + bad = [] + for pattern in globs: + bad.extend( + fnmatch.filter( + files, os.path.join(src_dir, convert_path(pattern)) + ) + ) + bad = dict.fromkeys(bad) + seen = {} + return [ + f for f in files if f not in bad + and f not in seen and seen.setdefault(f,1) # ditch dupes + ] + + +def assert_relative(path): + if not os.path.isabs(path): + return path + from distutils.errors import DistutilsSetupError + raise DistutilsSetupError( +"""Error: setup script specifies an absolute path: + + %s + +setup() arguments must *always* be /-separated paths relative to the +setup.py directory, *never* absolute paths. +""" % path + ) + + + + + + + + + diff --git a/awx/lib/site-packages/setuptools/command/develop.py b/awx/lib/site-packages/setuptools/command/develop.py new file mode 100644 index 0000000000..1d500040d0 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/develop.py @@ -0,0 +1,167 @@ +from setuptools.command.easy_install import easy_install +from distutils.util import convert_path, subst_vars +from pkg_resources import Distribution, PathMetadata, normalize_path +from distutils import log +from distutils.errors import DistutilsError, DistutilsOptionError +import os, sys, setuptools, glob + +class develop(easy_install): + """Set up package for development""" + + description = "install package in 'development mode'" + + user_options = easy_install.user_options + [ + ("uninstall", "u", "Uninstall this source package"), + ("egg-path=", None, "Set the path to be used in the .egg-link file"), + ] + + boolean_options = easy_install.boolean_options + ['uninstall'] + + command_consumes_arguments = False # override base + + def run(self): + if self.uninstall: + self.multi_version = True + self.uninstall_link() + else: + self.install_for_development() + self.warn_deprecated_options() + + def initialize_options(self): + self.uninstall = None + self.egg_path = None + easy_install.initialize_options(self) + self.setup_path = None + self.always_copy_from = '.' # always copy eggs installed in curdir + + + + def finalize_options(self): + ei = self.get_finalized_command("egg_info") + if ei.broken_egg_info: + raise DistutilsError( + "Please rename %r to %r before using 'develop'" + % (ei.egg_info, ei.broken_egg_info) + ) + self.args = [ei.egg_name] + + + + + easy_install.finalize_options(self) + self.expand_basedirs() + self.expand_dirs() + # pick up setup-dir .egg files only: no .egg-info + self.package_index.scan(glob.glob('*.egg')) + + self.egg_link = os.path.join(self.install_dir, ei.egg_name+'.egg-link') + self.egg_base = ei.egg_base + if self.egg_path is None: + self.egg_path = os.path.abspath(ei.egg_base) + + target = normalize_path(self.egg_base) + if normalize_path(os.path.join(self.install_dir, self.egg_path)) != target: + raise DistutilsOptionError( + "--egg-path must be a relative path from the install" + " directory to "+target + ) + + # Make a distribution for the package's source + self.dist = Distribution( + target, + PathMetadata(target, os.path.abspath(ei.egg_info)), + project_name = ei.egg_name + ) + + p = self.egg_base.replace(os.sep,'/') + if p!= os.curdir: + p = '../' * (p.count('/')+1) + self.setup_path = p + p = normalize_path(os.path.join(self.install_dir, self.egg_path, p)) + if p != normalize_path(os.curdir): + raise DistutilsOptionError( + "Can't get a consistent path to setup script from" + " installation directory", p, normalize_path(os.curdir)) + + def install_for_development(self): + if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False): + # If we run 2to3 we can not do this inplace: + + # Ensure metadata is up-to-date + self.reinitialize_command('build_py', inplace=0) + self.run_command('build_py') + bpy_cmd = self.get_finalized_command("build_py") + build_path = normalize_path(bpy_cmd.build_lib) + + # Build extensions + self.reinitialize_command('egg_info', egg_base=build_path) + self.run_command('egg_info') + + self.reinitialize_command('build_ext', inplace=0) + self.run_command('build_ext') + + # Fixup egg-link and easy-install.pth + ei_cmd = self.get_finalized_command("egg_info") + self.egg_path = build_path + self.dist.location = build_path + self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info) # XXX + else: + # Without 2to3 inplace works fine: + self.run_command('egg_info') + + # Build extensions in-place + self.reinitialize_command('build_ext', inplace=1) + self.run_command('build_ext') + + self.install_site_py() # ensure that target dir is site-safe + if setuptools.bootstrap_install_from: + self.easy_install(setuptools.bootstrap_install_from) + setuptools.bootstrap_install_from = None + + # create an .egg-link in the installation dir, pointing to our egg + log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) + if not self.dry_run: + f = open(self.egg_link,"w") + f.write(self.egg_path + "\n" + self.setup_path) + f.close() + # postprocess the installed distro, fixing up .pth, installing scripts, + # and handling requirements + self.process_distribution(None, self.dist, not self.no_deps) + + + def uninstall_link(self): + if os.path.exists(self.egg_link): + log.info("Removing %s (link to %s)", self.egg_link, self.egg_base) + egg_link_file = open(self.egg_link) + contents = [line.rstrip() for line in egg_link_file] + egg_link_file.close() + if contents not in ([self.egg_path], [self.egg_path, self.setup_path]): + log.warn("Link points to %s: uninstall aborted", contents) + return + if not self.dry_run: + os.unlink(self.egg_link) + if not self.dry_run: + self.update_pth(self.dist) # remove any .pth link to us + if self.distribution.scripts: + # XXX should also check for entry point scripts! + log.warn("Note: you must uninstall or replace scripts manually!") + + def install_egg_scripts(self, dist): + if dist is not self.dist: + # Installing a dependency, so fall back to normal behavior + return easy_install.install_egg_scripts(self,dist) + + # create wrapper scripts in the script dir, pointing to dist.scripts + + # new-style... + self.install_wrapper_scripts(dist) + + # ...and old-style + for script_name in self.distribution.scripts or []: + script_path = os.path.abspath(convert_path(script_name)) + script_name = os.path.basename(script_path) + f = open(script_path,'rU') + script_text = f.read() + f.close() + self.install_script(dist, script_name, script_text, script_path) + diff --git a/awx/lib/site-packages/setuptools/command/easy_install.py b/awx/lib/site-packages/setuptools/command/easy_install.py new file mode 100644 index 0000000000..6ce19fa4df --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/easy_install.py @@ -0,0 +1,1931 @@ +#!python + +""" +Easy Install +------------ + +A tool for doing automatic download/extract/build of distutils-based Python +packages. For detailed documentation, see the accompanying EasyInstall.txt +file, or visit the `EasyInstall home page`__. + +__ https://pythonhosted.org/setuptools/easy_install.html + +""" + +import sys +import os +import zipimport +import shutil +import tempfile +import zipfile +import re +import stat +import random +import platform +import textwrap +import warnings +import site +import struct +from glob import glob +from distutils import log, dir_util + +import pkg_resources +from setuptools import Command, _dont_write_bytecode +from setuptools.sandbox import run_setup +try: + # Python 2.7 or >=3.2 + from sysconfig import get_config_vars, get_path + def _get_platlib(): + return get_path("platlib") + def _get_purelib(): + return get_path("purelib") +except ImportError: + from distutils.sysconfig import get_config_vars, get_python_lib + def _get_platlib(): + return get_python_lib(True) + def _get_purelib(): + return get_python_lib(False) + +from distutils.util import get_platform +from distutils.util import convert_path, subst_vars +from distutils.errors import DistutilsArgError, DistutilsOptionError, \ + DistutilsError, DistutilsPlatformError +from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS +from setuptools.command import setopt +from setuptools.archive_util import unpack_archive +from setuptools.package_index import PackageIndex +from setuptools.package_index import URL_SCHEME +from setuptools.command import bdist_egg, egg_info +from setuptools.compat import (iteritems, maxsize, xrange, basestring, unicode, + reraise) +from pkg_resources import ( + yield_lines, normalize_path, resource_string, ensure_directory, + get_distribution, find_distributions, Environment, Requirement, + Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, + VersionConflict, DEVELOP_DIST, +) + +if '__VENV_LAUNCHER__' in os.environ: + sys_executable = os.environ['__VENV_LAUNCHER__'] +else: + sys_executable = os.path.normpath(sys.executable) + +__all__ = [ + 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', + 'main', 'get_exe_prefixes', +] + +HAS_USER_SITE = not sys.version < "2.6" and site.ENABLE_USER_SITE + +def is_64bit(): + return struct.calcsize("P") == 8 + +def samefile(p1, p2): + both_exist = os.path.exists(p1) and os.path.exists(p2) + use_samefile = hasattr(os.path, 'samefile') and both_exist + if use_samefile: + return os.path.samefile(p1, p2) + norm_p1 = os.path.normpath(os.path.normcase(p1)) + norm_p2 = os.path.normpath(os.path.normcase(p2)) + return norm_p1 == norm_p2 + +if sys.version_info <= (3,): + def _to_ascii(s): + return s + def isascii(s): + try: + unicode(s, 'ascii') + return True + except UnicodeError: + return False +else: + def _to_ascii(s): + return s.encode('ascii') + def isascii(s): + try: + s.encode('ascii') + return True + except UnicodeError: + return False + +class easy_install(Command): + """Manage a download/build/install process""" + description = "Find/get/install Python packages" + command_consumes_arguments = True + + user_options = [ + ('prefix=', None, "installation prefix"), + ("zip-ok", "z", "install package as a zipfile"), + ("multi-version", "m", "make apps have to require() a version"), + ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"), + ("install-dir=", "d", "install package to DIR"), + ("script-dir=", "s", "install scripts to DIR"), + ("exclude-scripts", "x", "Don't install scripts"), + ("always-copy", "a", "Copy all needed packages to install dir"), + ("index-url=", "i", "base URL of Python Package Index"), + ("find-links=", "f", "additional URL(s) to search for packages"), + ("build-directory=", "b", + "download/extract/build in DIR; keep the results"), + ('optimize=', 'O', + "also compile with optimization: -O1 for \"python -O\", " + "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), + ('record=', None, + "filename in which to record list of installed files"), + ('always-unzip', 'Z', "don't install as a zipfile, no matter what"), + ('site-dirs=','S',"list of directories where .pth files work"), + ('editable', 'e', "Install specified packages in editable form"), + ('no-deps', 'N', "don't install dependencies"), + ('allow-hosts=', 'H', "pattern(s) that hostnames must match"), + ('local-snapshots-ok', 'l', + "allow building eggs from local checkouts"), + ('version', None, "print version information and exit"), + ('no-find-links', None, + "Don't load find-links defined in packages being installed") + ] + boolean_options = [ + 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy', + 'editable', + 'no-deps', 'local-snapshots-ok', 'version' + ] + + if HAS_USER_SITE: + help_msg = "install in user site-package '%s'" % site.USER_SITE + user_options.append(('user', None, help_msg)) + boolean_options.append('user') + + negative_opt = {'always-unzip': 'zip-ok'} + create_index = PackageIndex + + def initialize_options(self): + if HAS_USER_SITE: + whereami = os.path.abspath(__file__) + self.user = whereami.startswith(site.USER_SITE) + else: + self.user = 0 + + self.zip_ok = self.local_snapshots_ok = None + self.install_dir = self.script_dir = self.exclude_scripts = None + self.index_url = None + self.find_links = None + self.build_directory = None + self.args = None + self.optimize = self.record = None + self.upgrade = self.always_copy = self.multi_version = None + self.editable = self.no_deps = self.allow_hosts = None + self.root = self.prefix = self.no_report = None + self.version = None + self.install_purelib = None # for pure module distributions + self.install_platlib = None # non-pure (dists w/ extensions) + self.install_headers = None # for C/C++ headers + self.install_lib = None # set to either purelib or platlib + self.install_scripts = None + self.install_data = None + self.install_base = None + self.install_platbase = None + if HAS_USER_SITE: + self.install_userbase = site.USER_BASE + self.install_usersite = site.USER_SITE + else: + self.install_userbase = None + self.install_usersite = None + self.no_find_links = None + + # Options not specifiable via command line + self.package_index = None + self.pth_file = self.always_copy_from = None + self.site_dirs = None + self.installed_projects = {} + self.sitepy_installed = False + # Always read easy_install options, even if we are subclassed, or have + # an independent instance created. This ensures that defaults will + # always come from the standard configuration file(s)' "easy_install" + # section, even if this is a "develop" or "install" command, or some + # other embedding. + self._dry_run = None + self.verbose = self.distribution.verbose + self.distribution._set_command_options( + self, self.distribution.get_option_dict('easy_install') + ) + + def delete_blockers(self, blockers): + for filename in blockers: + if os.path.exists(filename) or os.path.islink(filename): + log.info("Deleting %s", filename) + if not self.dry_run: + if os.path.isdir(filename) and not os.path.islink(filename): + rmtree(filename) + else: + os.unlink(filename) + + def finalize_options(self): + if self.version: + print('setuptools %s' % get_distribution('setuptools').version) + sys.exit() + + py_version = sys.version.split()[0] + prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix') + + self.config_vars = { + 'dist_name': self.distribution.get_name(), + 'dist_version': self.distribution.get_version(), + 'dist_fullname': self.distribution.get_fullname(), + 'py_version': py_version, + 'py_version_short': py_version[0:3], + 'py_version_nodot': py_version[0] + py_version[2], + 'sys_prefix': prefix, + 'prefix': prefix, + 'sys_exec_prefix': exec_prefix, + 'exec_prefix': exec_prefix, + # Only python 3.2+ has abiflags + 'abiflags': getattr(sys, 'abiflags', ''), + } + + if HAS_USER_SITE: + self.config_vars['userbase'] = self.install_userbase + self.config_vars['usersite'] = self.install_usersite + + # fix the install_dir if "--user" was used + #XXX: duplicate of the code in the setup command + if self.user and HAS_USER_SITE: + self.create_home_path() + if self.install_userbase is None: + raise DistutilsPlatformError( + "User base directory is not specified") + self.install_base = self.install_platbase = self.install_userbase + if os.name == 'posix': + self.select_scheme("unix_user") + else: + self.select_scheme(os.name + "_user") + + self.expand_basedirs() + self.expand_dirs() + + self._expand('install_dir','script_dir','build_directory','site_dirs') + # If a non-default installation directory was specified, default the + # script directory to match it. + if self.script_dir is None: + self.script_dir = self.install_dir + + if self.no_find_links is None: + self.no_find_links = False + + # Let install_dir get set by install_lib command, which in turn + # gets its info from the install command, and takes into account + # --prefix and --home and all that other crud. + self.set_undefined_options('install_lib', + ('install_dir','install_dir') + ) + # Likewise, set default script_dir from 'install_scripts.install_dir' + self.set_undefined_options('install_scripts', + ('install_dir', 'script_dir') + ) + + if self.user and self.install_purelib: + self.install_dir = self.install_purelib + self.script_dir = self.install_scripts + # default --record from the install command + self.set_undefined_options('install', ('record', 'record')) + # Should this be moved to the if statement below? It's not used + # elsewhere + normpath = map(normalize_path, sys.path) + self.all_site_dirs = get_site_dirs() + if self.site_dirs is not None: + site_dirs = [ + os.path.expanduser(s.strip()) for s in self.site_dirs.split(',') + ] + for d in site_dirs: + if not os.path.isdir(d): + log.warn("%s (in --site-dirs) does not exist", d) + elif normalize_path(d) not in normpath: + raise DistutilsOptionError( + d+" (in --site-dirs) is not on sys.path" + ) + else: + self.all_site_dirs.append(normalize_path(d)) + if not self.editable: self.check_site_dir() + self.index_url = self.index_url or "https://pypi.python.org/simple" + self.shadow_path = self.all_site_dirs[:] + for path_item in self.install_dir, normalize_path(self.script_dir): + if path_item not in self.shadow_path: + self.shadow_path.insert(0, path_item) + + if self.allow_hosts is not None: + hosts = [s.strip() for s in self.allow_hosts.split(',')] + else: + hosts = ['*'] + if self.package_index is None: + self.package_index = self.create_index( + self.index_url, search_path = self.shadow_path, hosts=hosts, + ) + self.local_index = Environment(self.shadow_path+sys.path) + + if self.find_links is not None: + if isinstance(self.find_links, basestring): + self.find_links = self.find_links.split() + else: + self.find_links = [] + if self.local_snapshots_ok: + self.package_index.scan_egg_links(self.shadow_path+sys.path) + if not self.no_find_links: + self.package_index.add_find_links(self.find_links) + self.set_undefined_options('install_lib', ('optimize','optimize')) + if not isinstance(self.optimize,int): + try: + self.optimize = int(self.optimize) + if not (0 <= self.optimize <= 2): raise ValueError + except ValueError: + raise DistutilsOptionError("--optimize must be 0, 1, or 2") + + if self.editable and not self.build_directory: + raise DistutilsArgError( + "Must specify a build directory (-b) when using --editable" + ) + if not self.args: + raise DistutilsArgError( + "No urls, filenames, or requirements specified (see --help)") + + self.outputs = [] + + def _expand_attrs(self, attrs): + for attr in attrs: + val = getattr(self, attr) + if val is not None: + if os.name == 'posix' or os.name == 'nt': + val = os.path.expanduser(val) + val = subst_vars(val, self.config_vars) + setattr(self, attr, val) + + def expand_basedirs(self): + """Calls `os.path.expanduser` on install_base, install_platbase and + root.""" + self._expand_attrs(['install_base', 'install_platbase', 'root']) + + def expand_dirs(self): + """Calls `os.path.expanduser` on install dirs.""" + self._expand_attrs(['install_purelib', 'install_platlib', + 'install_lib', 'install_headers', + 'install_scripts', 'install_data',]) + + def run(self): + if self.verbose != self.distribution.verbose: + log.set_verbosity(self.verbose) + try: + for spec in self.args: + self.easy_install(spec, not self.no_deps) + if self.record: + outputs = self.outputs + if self.root: # strip any package prefix + root_len = len(self.root) + for counter in xrange(len(outputs)): + outputs[counter] = outputs[counter][root_len:] + from distutils import file_util + self.execute( + file_util.write_file, (self.record, outputs), + "writing list of installed files to '%s'" % + self.record + ) + self.warn_deprecated_options() + finally: + log.set_verbosity(self.distribution.verbose) + + def pseudo_tempname(self): + """Return a pseudo-tempname base in the install directory. + This code is intentionally naive; if a malicious party can write to + the target directory you're already in deep doodoo. + """ + try: + pid = os.getpid() + except: + pid = random.randint(0, maxsize) + return os.path.join(self.install_dir, "test-easy-install-%s" % pid) + + def warn_deprecated_options(self): + pass + + def check_site_dir(self): + """Verify that self.install_dir is .pth-capable dir, if needed""" + + instdir = normalize_path(self.install_dir) + pth_file = os.path.join(instdir,'easy-install.pth') + + # Is it a configured, PYTHONPATH, implicit, or explicit site dir? + is_site_dir = instdir in self.all_site_dirs + + if not is_site_dir and not self.multi_version: + # No? Then directly test whether it does .pth file processing + is_site_dir = self.check_pth_processing() + else: + # make sure we can write to target dir + testfile = self.pseudo_tempname()+'.write-test' + test_exists = os.path.exists(testfile) + try: + if test_exists: os.unlink(testfile) + open(testfile,'w').close() + os.unlink(testfile) + except (OSError,IOError): + self.cant_write_to_target() + + if not is_site_dir and not self.multi_version: + # Can't install non-multi to non-site dir + raise DistutilsError(self.no_default_version_msg()) + + if is_site_dir: + if self.pth_file is None: + self.pth_file = PthDistributions(pth_file, self.all_site_dirs) + else: + self.pth_file = None + + PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep) + if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]): + # only PYTHONPATH dirs need a site.py, so pretend it's there + self.sitepy_installed = True + elif self.multi_version and not os.path.exists(pth_file): + self.sitepy_installed = True # don't need site.py in this case + self.pth_file = None # and don't create a .pth file + self.install_dir = instdir + + def cant_write_to_target(self): + template = """can't create or remove files in install directory + +The following error occurred while trying to add or remove files in the +installation directory: + + %s + +The installation directory you specified (via --install-dir, --prefix, or +the distutils default setting) was: + + %s +""" + msg = template % (sys.exc_info()[1], self.install_dir,) + + if not os.path.exists(self.install_dir): + msg += """ +This directory does not currently exist. Please create it and try again, or +choose a different installation directory (using the -d or --install-dir +option). +""" + else: + msg += """ +Perhaps your account does not have write access to this directory? If the +installation directory is a system-owned directory, you may need to sign in +as the administrator or "root" account. If you do not have administrative +access to this machine, you may wish to choose a different installation +directory, preferably one that is listed in your PYTHONPATH environment +variable. + +For information on other options, you may wish to consult the +documentation at: + + https://pythonhosted.org/setuptools/easy_install.html + +Please make the appropriate changes for your system and try again. +""" + raise DistutilsError(msg) + + def check_pth_processing(self): + """Empirically verify whether .pth files are supported in inst. dir""" + instdir = self.install_dir + log.info("Checking .pth file support in %s", instdir) + pth_file = self.pseudo_tempname()+".pth" + ok_file = pth_file+'.ok' + ok_exists = os.path.exists(ok_file) + try: + if ok_exists: os.unlink(ok_file) + dirname = os.path.dirname(ok_file) + if not os.path.exists(dirname): + os.makedirs(dirname) + f = open(pth_file,'w') + except (OSError,IOError): + self.cant_write_to_target() + else: + try: + f.write("import os; f = open(%r, 'w'); f.write('OK'); f.close()\n" % (ok_file,)) + f.close() + f=None + executable = sys.executable + if os.name=='nt': + dirname,basename = os.path.split(executable) + alt = os.path.join(dirname,'pythonw.exe') + if basename.lower()=='python.exe' and os.path.exists(alt): + # use pythonw.exe to avoid opening a console window + executable = alt + + from distutils.spawn import spawn + spawn([executable,'-E','-c','pass'],0) + + if os.path.exists(ok_file): + log.info( + "TEST PASSED: %s appears to support .pth files", + instdir + ) + return True + finally: + if f: + f.close() + if os.path.exists(ok_file): + os.unlink(ok_file) + if os.path.exists(pth_file): + os.unlink(pth_file) + if not self.multi_version: + log.warn("TEST FAILED: %s does NOT support .pth files", instdir) + return False + + def install_egg_scripts(self, dist): + """Write all the scripts for `dist`, unless scripts are excluded""" + if not self.exclude_scripts and dist.metadata_isdir('scripts'): + for script_name in dist.metadata_listdir('scripts'): + if dist.metadata_isdir('scripts/' + script_name): + # The "script" is a directory, likely a Python 3 + # __pycache__ directory, so skip it. + continue + self.install_script( + dist, script_name, + dist.get_metadata('scripts/'+script_name) + ) + self.install_wrapper_scripts(dist) + + def add_output(self, path): + if os.path.isdir(path): + for base, dirs, files in os.walk(path): + for filename in files: + self.outputs.append(os.path.join(base,filename)) + else: + self.outputs.append(path) + + def not_editable(self, spec): + if self.editable: + raise DistutilsArgError( + "Invalid argument %r: you can't use filenames or URLs " + "with --editable (except via the --find-links option)." + % (spec,) + ) + + def check_editable(self,spec): + if not self.editable: + return + + if os.path.exists(os.path.join(self.build_directory, spec.key)): + raise DistutilsArgError( + "%r already exists in %s; can't do a checkout there" % + (spec.key, self.build_directory) + ) + + def easy_install(self, spec, deps=False): + tmpdir = tempfile.mkdtemp(prefix="easy_install-") + download = None + if not self.editable: self.install_site_py() + + try: + if not isinstance(spec,Requirement): + if URL_SCHEME(spec): + # It's a url, download it to tmpdir and process + self.not_editable(spec) + download = self.package_index.download(spec, tmpdir) + return self.install_item(None, download, tmpdir, deps, True) + + elif os.path.exists(spec): + # Existing file or directory, just process it directly + self.not_editable(spec) + return self.install_item(None, spec, tmpdir, deps, True) + else: + spec = parse_requirement_arg(spec) + + self.check_editable(spec) + dist = self.package_index.fetch_distribution( + spec, tmpdir, self.upgrade, self.editable, not self.always_copy, + self.local_index + ) + if dist is None: + msg = "Could not find suitable distribution for %r" % spec + if self.always_copy: + msg+=" (--always-copy skips system and development eggs)" + raise DistutilsError(msg) + elif dist.precedence==DEVELOP_DIST: + # .egg-info dists don't need installing, just process deps + self.process_distribution(spec, dist, deps, "Using") + return dist + else: + return self.install_item(spec, dist.location, tmpdir, deps) + + finally: + if os.path.exists(tmpdir): + rmtree(tmpdir) + + def install_item(self, spec, download, tmpdir, deps, install_needed=False): + + # Installation is also needed if file in tmpdir or is not an egg + install_needed = install_needed or self.always_copy + install_needed = install_needed or os.path.dirname(download) == tmpdir + install_needed = install_needed or not download.endswith('.egg') + install_needed = install_needed or ( + self.always_copy_from is not None and + os.path.dirname(normalize_path(download)) == + normalize_path(self.always_copy_from) + ) + + if spec and not install_needed: + # at this point, we know it's a local .egg, we just don't know if + # it's already installed. + for dist in self.local_index[spec.project_name]: + if dist.location==download: + break + else: + install_needed = True # it's not in the local index + + log.info("Processing %s", os.path.basename(download)) + + if install_needed: + dists = self.install_eggs(spec, download, tmpdir) + for dist in dists: + self.process_distribution(spec, dist, deps) + else: + dists = [self.egg_distribution(download)] + self.process_distribution(spec, dists[0], deps, "Using") + + if spec is not None: + for dist in dists: + if dist in spec: + return dist + + def select_scheme(self, name): + """Sets the install directories by applying the install schemes.""" + # it's the caller's problem if they supply a bad name! + scheme = INSTALL_SCHEMES[name] + for key in SCHEME_KEYS: + attrname = 'install_' + key + if getattr(self, attrname) is None: + setattr(self, attrname, scheme[key]) + + def process_distribution(self, requirement, dist, deps=True, *info): + self.update_pth(dist) + self.package_index.add(dist) + self.local_index.add(dist) + self.install_egg_scripts(dist) + self.installed_projects[dist.key] = dist + log.info(self.installation_report(requirement, dist, *info)) + if (dist.has_metadata('dependency_links.txt') and + not self.no_find_links): + self.package_index.add_find_links( + dist.get_metadata_lines('dependency_links.txt') + ) + if not deps and not self.always_copy: + return + elif requirement is not None and dist.key != requirement.key: + log.warn("Skipping dependencies for %s", dist) + return # XXX this is not the distribution we were looking for + elif requirement is None or dist not in requirement: + # if we wound up with a different version, resolve what we've got + distreq = dist.as_requirement() + requirement = requirement or distreq + requirement = Requirement( + distreq.project_name, distreq.specs, requirement.extras + ) + log.info("Processing dependencies for %s", requirement) + try: + distros = WorkingSet([]).resolve( + [requirement], self.local_index, self.easy_install + ) + except DistributionNotFound: + e = sys.exc_info()[1] + raise DistutilsError( + "Could not find required distribution %s" % e.args + ) + except VersionConflict: + e = sys.exc_info()[1] + raise DistutilsError( + "Installed distribution %s conflicts with requirement %s" + % e.args + ) + if self.always_copy or self.always_copy_from: + # Force all the relevant distros to be copied or activated + for dist in distros: + if dist.key not in self.installed_projects: + self.easy_install(dist.as_requirement()) + log.info("Finished processing dependencies for %s", requirement) + + def should_unzip(self, dist): + if self.zip_ok is not None: + return not self.zip_ok + if dist.has_metadata('not-zip-safe'): + return True + if not dist.has_metadata('zip-safe'): + return True + return False + + def maybe_move(self, spec, dist_filename, setup_base): + dst = os.path.join(self.build_directory, spec.key) + if os.path.exists(dst): + msg = "%r already exists in %s; build directory %s will not be kept" + log.warn(msg, spec.key, self.build_directory, setup_base) + return setup_base + if os.path.isdir(dist_filename): + setup_base = dist_filename + else: + if os.path.dirname(dist_filename)==setup_base: + os.unlink(dist_filename) # get it out of the tmp dir + contents = os.listdir(setup_base) + if len(contents)==1: + dist_filename = os.path.join(setup_base,contents[0]) + if os.path.isdir(dist_filename): + # if the only thing there is a directory, move it instead + setup_base = dist_filename + ensure_directory(dst) + shutil.move(setup_base, dst) + return dst + + def install_wrapper_scripts(self, dist): + if not self.exclude_scripts: + for args in get_script_args(dist): + self.write_script(*args) + + def install_script(self, dist, script_name, script_text, dev_path=None): + """Generate a legacy script wrapper and install it""" + spec = str(dist.as_requirement()) + is_script = is_python_script(script_text, script_name) + + def get_template(filename): + """ + There are a couple of template scripts in the package. This + function loads one of them and prepares it for use. + + These templates use triple-quotes to escape variable + substitutions so the scripts get the 2to3 treatment when build + on Python 3. The templates cannot use triple-quotes naturally. + """ + raw_bytes = resource_string('setuptools', template_name) + template_str = raw_bytes.decode('utf-8') + clean_template = template_str.replace('"""', '') + return clean_template + + if is_script: + template_name = 'script template.py' + if dev_path: + template_name = template_name.replace('.py', ' (dev).py') + script_text = (get_script_header(script_text) + + get_template(template_name) % locals()) + self.write_script(script_name, _to_ascii(script_text), 'b') + + def write_script(self, script_name, contents, mode="t", blockers=()): + """Write an executable file to the scripts directory""" + self.delete_blockers( # clean up old .py/.pyw w/o a script + [os.path.join(self.script_dir,x) for x in blockers]) + log.info("Installing %s script to %s", script_name, self.script_dir) + target = os.path.join(self.script_dir, script_name) + self.add_output(target) + + mask = current_umask() + if not self.dry_run: + ensure_directory(target) + if os.path.exists(target): + os.unlink(target) + f = open(target,"w"+mode) + f.write(contents) + f.close() + chmod(target, 0x1FF-mask) # 0777 + + def install_eggs(self, spec, dist_filename, tmpdir): + # .egg dirs or files are already built, so just return them + if dist_filename.lower().endswith('.egg'): + return [self.install_egg(dist_filename, tmpdir)] + elif dist_filename.lower().endswith('.exe'): + return [self.install_exe(dist_filename, tmpdir)] + + # Anything else, try to extract and build + setup_base = tmpdir + if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'): + unpack_archive(dist_filename, tmpdir, self.unpack_progress) + elif os.path.isdir(dist_filename): + setup_base = os.path.abspath(dist_filename) + + if (setup_base.startswith(tmpdir) # something we downloaded + and self.build_directory and spec is not None): + setup_base = self.maybe_move(spec, dist_filename, setup_base) + + # Find the setup.py file + setup_script = os.path.join(setup_base, 'setup.py') + + if not os.path.exists(setup_script): + setups = glob(os.path.join(setup_base, '*', 'setup.py')) + if not setups: + raise DistutilsError( + "Couldn't find a setup script in %s" % os.path.abspath(dist_filename) + ) + if len(setups)>1: + raise DistutilsError( + "Multiple setup scripts in %s" % os.path.abspath(dist_filename) + ) + setup_script = setups[0] + + # Now run it, and return the result + if self.editable: + log.info(self.report_editable(spec, setup_script)) + return [] + else: + return self.build_and_install(setup_script, setup_base) + + def egg_distribution(self, egg_path): + if os.path.isdir(egg_path): + metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO')) + else: + metadata = EggMetadata(zipimport.zipimporter(egg_path)) + return Distribution.from_filename(egg_path,metadata=metadata) + + def install_egg(self, egg_path, tmpdir): + destination = os.path.join(self.install_dir,os.path.basename(egg_path)) + destination = os.path.abspath(destination) + if not self.dry_run: + ensure_directory(destination) + + dist = self.egg_distribution(egg_path) + if not samefile(egg_path, destination): + if os.path.isdir(destination) and not os.path.islink(destination): + dir_util.remove_tree(destination, dry_run=self.dry_run) + elif os.path.exists(destination): + self.execute(os.unlink,(destination,),"Removing "+destination) + uncache_zipdir(destination) + if os.path.isdir(egg_path): + if egg_path.startswith(tmpdir): + f,m = shutil.move, "Moving" + else: + f,m = shutil.copytree, "Copying" + elif self.should_unzip(dist): + self.mkpath(destination) + f,m = self.unpack_and_compile, "Extracting" + elif egg_path.startswith(tmpdir): + f,m = shutil.move, "Moving" + else: + f,m = shutil.copy2, "Copying" + + self.execute(f, (egg_path, destination), + (m+" %s to %s") % + (os.path.basename(egg_path),os.path.dirname(destination))) + + self.add_output(destination) + return self.egg_distribution(destination) + + def install_exe(self, dist_filename, tmpdir): + # See if it's valid, get data + cfg = extract_wininst_cfg(dist_filename) + if cfg is None: + raise DistutilsError( + "%s is not a valid distutils Windows .exe" % dist_filename + ) + # Create a dummy distribution object until we build the real distro + dist = Distribution( + None, + project_name=cfg.get('metadata','name'), + version=cfg.get('metadata','version'), platform=get_platform(), + ) + + # Convert the .exe to an unpacked egg + egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg') + egg_tmp = egg_path + '.tmp' + _egg_info = os.path.join(egg_tmp, 'EGG-INFO') + pkg_inf = os.path.join(_egg_info, 'PKG-INFO') + ensure_directory(pkg_inf) # make sure EGG-INFO dir exists + dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX + self.exe_to_egg(dist_filename, egg_tmp) + + # Write EGG-INFO/PKG-INFO + if not os.path.exists(pkg_inf): + f = open(pkg_inf,'w') + f.write('Metadata-Version: 1.0\n') + for k,v in cfg.items('metadata'): + if k != 'target_version': + f.write('%s: %s\n' % (k.replace('_','-').title(), v)) + f.close() + script_dir = os.path.join(_egg_info,'scripts') + self.delete_blockers( # delete entry-point scripts to avoid duping + [os.path.join(script_dir,args[0]) for args in get_script_args(dist)] + ) + # Build .egg file from tmpdir + bdist_egg.make_zipfile( + egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run + ) + # install the .egg + return self.install_egg(egg_path, tmpdir) + + def exe_to_egg(self, dist_filename, egg_tmp): + """Extract a bdist_wininst to the directories an egg would use""" + # Check for .pth file and set up prefix translations + prefixes = get_exe_prefixes(dist_filename) + to_compile = [] + native_libs = [] + top_level = {} + def process(src,dst): + s = src.lower() + for old,new in prefixes: + if s.startswith(old): + src = new+src[len(old):] + parts = src.split('/') + dst = os.path.join(egg_tmp, *parts) + dl = dst.lower() + if dl.endswith('.pyd') or dl.endswith('.dll'): + parts[-1] = bdist_egg.strip_module(parts[-1]) + top_level[os.path.splitext(parts[0])[0]] = 1 + native_libs.append(src) + elif dl.endswith('.py') and old!='SCRIPTS/': + top_level[os.path.splitext(parts[0])[0]] = 1 + to_compile.append(dst) + return dst + if not src.endswith('.pth'): + log.warn("WARNING: can't process %s", src) + return None + # extract, tracking .pyd/.dll->native_libs and .py -> to_compile + unpack_archive(dist_filename, egg_tmp, process) + stubs = [] + for res in native_libs: + if res.lower().endswith('.pyd'): # create stubs for .pyd's + parts = res.split('/') + resource = parts[-1] + parts[-1] = bdist_egg.strip_module(parts[-1])+'.py' + pyfile = os.path.join(egg_tmp, *parts) + to_compile.append(pyfile) + stubs.append(pyfile) + bdist_egg.write_stub(resource, pyfile) + self.byte_compile(to_compile) # compile .py's + bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'), + bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag + + for name in 'top_level','native_libs': + if locals()[name]: + txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt') + if not os.path.exists(txt): + f = open(txt,'w') + f.write('\n'.join(locals()[name])+'\n') + f.close() + + def installation_report(self, req, dist, what="Installed"): + """Helpful installation message for display to package users""" + msg = "\n%(what)s %(eggloc)s%(extras)s" + if self.multi_version and not self.no_report: + msg += """ + +Because this distribution was installed --multi-version, before you can +import modules from this package in an application, you will need to +'import pkg_resources' and then use a 'require()' call similar to one of +these examples, in order to select the desired version: + + pkg_resources.require("%(name)s") # latest installed version + pkg_resources.require("%(name)s==%(version)s") # this exact version + pkg_resources.require("%(name)s>=%(version)s") # this version or higher +""" + if self.install_dir not in map(normalize_path,sys.path): + msg += """ + +Note also that the installation directory must be on sys.path at runtime for +this to work. (e.g. by being the application's script directory, by being on +PYTHONPATH, or by being added to sys.path by your code.) +""" + eggloc = dist.location + name = dist.project_name + version = dist.version + extras = '' # TODO: self.report_extras(req, dist) + return msg % locals() + + def report_editable(self, spec, setup_script): + dirname = os.path.dirname(setup_script) + python = sys.executable + return """\nExtracted editable version of %(spec)s to %(dirname)s + +If it uses setuptools in its setup script, you can activate it in +"development" mode by going to that directory and running:: + + %(python)s setup.py develop + +See the setuptools documentation for the "develop" command for more info. +""" % locals() + + def run_setup(self, setup_script, setup_base, args): + sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg) + sys.modules.setdefault('distutils.command.egg_info', egg_info) + + args = list(args) + if self.verbose>2: + v = 'v' * (self.verbose - 1) + args.insert(0,'-'+v) + elif self.verbose<2: + args.insert(0,'-q') + if self.dry_run: + args.insert(0,'-n') + log.info( + "Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args) + ) + try: + run_setup(setup_script, args) + except SystemExit: + v = sys.exc_info()[1] + raise DistutilsError("Setup script exited with %s" % (v.args[0],)) + + def build_and_install(self, setup_script, setup_base): + args = ['bdist_egg', '--dist-dir'] + + dist_dir = tempfile.mkdtemp( + prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script) + ) + try: + self._set_fetcher_options(os.path.dirname(setup_script)) + args.append(dist_dir) + + self.run_setup(setup_script, setup_base, args) + all_eggs = Environment([dist_dir]) + eggs = [] + for key in all_eggs: + for dist in all_eggs[key]: + eggs.append(self.install_egg(dist.location, setup_base)) + if not eggs and not self.dry_run: + log.warn("No eggs found in %s (setup script problem?)", + dist_dir) + return eggs + finally: + rmtree(dist_dir) + log.set_verbosity(self.verbose) # restore our log verbosity + + def _set_fetcher_options(self, base): + """ + When easy_install is about to run bdist_egg on a source dist, that + source dist might have 'setup_requires' directives, requiring + additional fetching. Ensure the fetcher options given to easy_install + are available to that command as well. + """ + # find the fetch options from easy_install and write them out + # to the setup.cfg file. + ei_opts = self.distribution.get_option_dict('easy_install').copy() + fetch_directives = ( + 'find_links', 'site_dirs', 'index_url', 'optimize', + 'site_dirs', 'allow_hosts', + ) + fetch_options = {} + for key, val in ei_opts.items(): + if key not in fetch_directives: continue + fetch_options[key.replace('_', '-')] = val[1] + # create a settings dictionary suitable for `edit_config` + settings = dict(easy_install=fetch_options) + cfg_filename = os.path.join(base, 'setup.cfg') + setopt.edit_config(cfg_filename, settings) + + def update_pth(self, dist): + if self.pth_file is None: + return + + for d in self.pth_file[dist.key]: # drop old entries + if self.multi_version or d.location != dist.location: + log.info("Removing %s from easy-install.pth file", d) + self.pth_file.remove(d) + if d.location in self.shadow_path: + self.shadow_path.remove(d.location) + + if not self.multi_version: + if dist.location in self.pth_file.paths: + log.info( + "%s is already the active version in easy-install.pth", + dist + ) + else: + log.info("Adding %s to easy-install.pth file", dist) + self.pth_file.add(dist) # add new entry + if dist.location not in self.shadow_path: + self.shadow_path.append(dist.location) + + if not self.dry_run: + + self.pth_file.save() + + if dist.key=='setuptools': + # Ensure that setuptools itself never becomes unavailable! + # XXX should this check for latest version? + filename = os.path.join(self.install_dir,'setuptools.pth') + if os.path.islink(filename): os.unlink(filename) + f = open(filename, 'wt') + f.write(self.pth_file.make_relative(dist.location)+'\n') + f.close() + + def unpack_progress(self, src, dst): + # Progress filter for unpacking + log.debug("Unpacking %s to %s", src, dst) + return dst # only unpack-and-compile skips files for dry run + + def unpack_and_compile(self, egg_path, destination): + to_compile = [] + to_chmod = [] + + def pf(src, dst): + if dst.endswith('.py') and not src.startswith('EGG-INFO/'): + to_compile.append(dst) + elif dst.endswith('.dll') or dst.endswith('.so'): + to_chmod.append(dst) + self.unpack_progress(src,dst) + return not self.dry_run and dst or None + + unpack_archive(egg_path, destination, pf) + self.byte_compile(to_compile) + if not self.dry_run: + for f in to_chmod: + mode = ((os.stat(f)[stat.ST_MODE]) | 0x16D) & 0xFED # 0555, 07755 + chmod(f, mode) + + def byte_compile(self, to_compile): + if _dont_write_bytecode: + self.warn('byte-compiling is disabled, skipping.') + return + + from distutils.util import byte_compile + try: + # try to make the byte compile messages quieter + log.set_verbosity(self.verbose - 1) + + byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run) + if self.optimize: + byte_compile( + to_compile, optimize=self.optimize, force=1, + dry_run=self.dry_run + ) + finally: + log.set_verbosity(self.verbose) # restore original verbosity + + def no_default_version_msg(self): + template = """bad install directory or PYTHONPATH + +You are attempting to install a package to a directory that is not +on PYTHONPATH and which Python does not read ".pth" files from. The +installation directory you specified (via --install-dir, --prefix, or +the distutils default setting) was: + + %s + +and your PYTHONPATH environment variable currently contains: + + %r + +Here are some of your options for correcting the problem: + +* You can choose a different installation directory, i.e., one that is + on PYTHONPATH or supports .pth files + +* You can add the installation directory to the PYTHONPATH environment + variable. (It must then also be on PYTHONPATH whenever you run + Python and want to use the package(s) you are installing.) + +* You can set up the installation directory to support ".pth" files by + using one of the approaches described here: + + https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations + +Please make the appropriate changes for your system and try again.""" + return template % (self.install_dir, os.environ.get('PYTHONPATH','')) + + def install_site_py(self): + """Make sure there's a site.py in the target dir, if needed""" + + if self.sitepy_installed: + return # already did it, or don't need to + + sitepy = os.path.join(self.install_dir, "site.py") + source = resource_string("setuptools", "site-patch.py") + current = "" + + if os.path.exists(sitepy): + log.debug("Checking existing site.py in %s", self.install_dir) + f = open(sitepy,'rb') + current = f.read() + # we want str, not bytes + if sys.version_info >= (3,): + current = current.decode() + + f.close() + if not current.startswith('def __boot():'): + raise DistutilsError( + "%s is not a setuptools-generated site.py; please" + " remove it." % sitepy + ) + + if current != source: + log.info("Creating %s", sitepy) + if not self.dry_run: + ensure_directory(sitepy) + f = open(sitepy,'wb') + f.write(source) + f.close() + self.byte_compile([sitepy]) + + self.sitepy_installed = True + + def create_home_path(self): + """Create directories under ~.""" + if not self.user: + return + home = convert_path(os.path.expanduser("~")) + for name, path in iteritems(self.config_vars): + if path.startswith(home) and not os.path.isdir(path): + self.debug_print("os.makedirs('%s', 0700)" % path) + os.makedirs(path, 0x1C0) # 0700 + + INSTALL_SCHEMES = dict( + posix = dict( + install_dir = '$base/lib/python$py_version_short/site-packages', + script_dir = '$base/bin', + ), + ) + + DEFAULT_SCHEME = dict( + install_dir = '$base/Lib/site-packages', + script_dir = '$base/Scripts', + ) + + def _expand(self, *attrs): + config_vars = self.get_finalized_command('install').config_vars + + if self.prefix: + # Set default install_dir/scripts from --prefix + config_vars = config_vars.copy() + config_vars['base'] = self.prefix + scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME) + for attr,val in scheme.items(): + if getattr(self,attr,None) is None: + setattr(self,attr,val) + + from distutils.util import subst_vars + for attr in attrs: + val = getattr(self, attr) + if val is not None: + val = subst_vars(val, config_vars) + if os.name == 'posix': + val = os.path.expanduser(val) + setattr(self, attr, val) + +def get_site_dirs(): + # return a list of 'site' dirs + sitedirs = [_f for _f in os.environ.get('PYTHONPATH', + '').split(os.pathsep) if _f] + prefixes = [sys.prefix] + if sys.exec_prefix != sys.prefix: + prefixes.append(sys.exec_prefix) + for prefix in prefixes: + if prefix: + if sys.platform in ('os2emx', 'riscos'): + sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) + elif os.sep == '/': + sitedirs.extend([os.path.join(prefix, + "lib", + "python" + sys.version[:3], + "site-packages"), + os.path.join(prefix, "lib", "site-python")]) + else: + sitedirs.extend( + [prefix, os.path.join(prefix, "lib", "site-packages")] + ) + if sys.platform == 'darwin': + # for framework builds *only* we add the standard Apple + # locations. Currently only per-user, but /Library and + # /Network/Library could be added too + if 'Python.framework' in prefix: + home = os.environ.get('HOME') + if home: + sitedirs.append( + os.path.join(home, + 'Library', + 'Python', + sys.version[:3], + 'site-packages')) + for site_lib in (_get_purelib(), _get_platlib()): + if site_lib not in sitedirs: sitedirs.append(site_lib) + + if HAS_USER_SITE: + sitedirs.append(site.USER_SITE) + + sitedirs = list(map(normalize_path, sitedirs)) + + return sitedirs + + +def expand_paths(inputs): + """Yield sys.path directories that might contain "old-style" packages""" + + seen = {} + + for dirname in inputs: + dirname = normalize_path(dirname) + if dirname in seen: + continue + + seen[dirname] = 1 + if not os.path.isdir(dirname): + continue + + files = os.listdir(dirname) + yield dirname, files + + for name in files: + if not name.endswith('.pth'): + # We only care about the .pth files + continue + if name in ('easy-install.pth','setuptools.pth'): + # Ignore .pth files that we control + continue + + # Read the .pth file + f = open(os.path.join(dirname,name)) + lines = list(yield_lines(f)) + f.close() + + # Yield existing non-dupe, non-import directory lines from it + for line in lines: + if not line.startswith("import"): + line = normalize_path(line.rstrip()) + if line not in seen: + seen[line] = 1 + if not os.path.isdir(line): + continue + yield line, os.listdir(line) + + +def extract_wininst_cfg(dist_filename): + """Extract configuration data from a bdist_wininst .exe + + Returns a ConfigParser.RawConfigParser, or None + """ + f = open(dist_filename,'rb') + try: + endrec = zipfile._EndRecData(f) + if endrec is None: + return None + + prepended = (endrec[9] - endrec[5]) - endrec[6] + if prepended < 12: # no wininst data here + return None + f.seek(prepended-12) + + from setuptools.compat import StringIO, ConfigParser + import struct + tag, cfglen, bmlen = struct.unpack("<iii",f.read(12)) + if tag not in (0x1234567A, 0x1234567B): + return None # not a valid tag + + f.seek(prepended-(12+cfglen)) + cfg = ConfigParser.RawConfigParser({'version':'','target_version':''}) + try: + part = f.read(cfglen) + # part is in bytes, but we need to read up to the first null + # byte. + if sys.version_info >= (2,6): + null_byte = bytes([0]) + else: + null_byte = chr(0) + config = part.split(null_byte, 1)[0] + # Now the config is in bytes, but on Python 3, it must be + # unicode for the RawConfigParser, so decode it. Is this the + # right encoding? + config = config.decode('ascii') + cfg.readfp(StringIO(config)) + except ConfigParser.Error: + return None + if not cfg.has_section('metadata') or not cfg.has_section('Setup'): + return None + return cfg + + finally: + f.close() + + +def get_exe_prefixes(exe_filename): + """Get exe->egg path translations for a given .exe file""" + + prefixes = [ + ('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''), + ('PLATLIB/', ''), + ('SCRIPTS/', 'EGG-INFO/scripts/'), + ('DATA/lib/site-packages', ''), + ] + z = zipfile.ZipFile(exe_filename) + try: + for info in z.infolist(): + name = info.filename + parts = name.split('/') + if len(parts)==3 and parts[2]=='PKG-INFO': + if parts[1].endswith('.egg-info'): + prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/')) + break + if len(parts) != 2 or not name.endswith('.pth'): + continue + if name.endswith('-nspkg.pth'): + continue + if parts[0].upper() in ('PURELIB','PLATLIB'): + contents = z.read(name) + if sys.version_info >= (3,): + contents = contents.decode() + for pth in yield_lines(contents): + pth = pth.strip().replace('\\','/') + if not pth.startswith('import'): + prefixes.append((('%s/%s/' % (parts[0],pth)), '')) + finally: + z.close() + prefixes = [(x.lower(),y) for x, y in prefixes] + prefixes.sort() + prefixes.reverse() + return prefixes + + +def parse_requirement_arg(spec): + try: + return Requirement.parse(spec) + except ValueError: + raise DistutilsError( + "Not a URL, existing file, or requirement spec: %r" % (spec,) + ) + +class PthDistributions(Environment): + """A .pth file with Distribution paths in it""" + + dirty = False + + def __init__(self, filename, sitedirs=()): + self.filename = filename + self.sitedirs = list(map(normalize_path, sitedirs)) + self.basedir = normalize_path(os.path.dirname(self.filename)) + self._load() + Environment.__init__(self, [], None, None) + for path in yield_lines(self.paths): + list(map(self.add, find_distributions(path, True))) + + def _load(self): + self.paths = [] + saw_import = False + seen = dict.fromkeys(self.sitedirs) + if os.path.isfile(self.filename): + f = open(self.filename,'rt') + for line in f: + if line.startswith('import'): + saw_import = True + continue + path = line.rstrip() + self.paths.append(path) + if not path.strip() or path.strip().startswith('#'): + continue + # skip non-existent paths, in case somebody deleted a package + # manually, and duplicate paths as well + path = self.paths[-1] = normalize_path( + os.path.join(self.basedir,path) + ) + if not os.path.exists(path) or path in seen: + self.paths.pop() # skip it + self.dirty = True # we cleaned up, so we're dirty now :) + continue + seen[path] = 1 + f.close() + + if self.paths and not saw_import: + self.dirty = True # ensure anything we touch has import wrappers + while self.paths and not self.paths[-1].strip(): + self.paths.pop() + + def save(self): + """Write changed .pth file back to disk""" + if not self.dirty: + return + + data = '\n'.join(map(self.make_relative,self.paths)) + if data: + log.debug("Saving %s", self.filename) + data = ( + "import sys; sys.__plen = len(sys.path)\n" + "%s\n" + "import sys; new=sys.path[sys.__plen:];" + " del sys.path[sys.__plen:];" + " p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;" + " sys.__egginsert = p+len(new)\n" + ) % data + + if os.path.islink(self.filename): + os.unlink(self.filename) + f = open(self.filename,'wt') + f.write(data) + f.close() + + elif os.path.exists(self.filename): + log.debug("Deleting empty %s", self.filename) + os.unlink(self.filename) + + self.dirty = False + + def add(self, dist): + """Add `dist` to the distribution map""" + if (dist.location not in self.paths and ( + dist.location not in self.sitedirs or + dist.location == os.getcwd() # account for '.' being in PYTHONPATH + )): + self.paths.append(dist.location) + self.dirty = True + Environment.add(self, dist) + + def remove(self, dist): + """Remove `dist` from the distribution map""" + while dist.location in self.paths: + self.paths.remove(dist.location) + self.dirty = True + Environment.remove(self, dist) + + def make_relative(self,path): + npath, last = os.path.split(normalize_path(path)) + baselen = len(self.basedir) + parts = [last] + sep = os.altsep=='/' and '/' or os.sep + while len(npath)>=baselen: + if npath==self.basedir: + parts.append(os.curdir) + parts.reverse() + return sep.join(parts) + npath, last = os.path.split(npath) + parts.append(last) + else: + return path + +def get_script_header(script_text, executable=sys_executable, wininst=False): + """Create a #! line, getting options (if any) from script_text""" + from distutils.command.build_scripts import first_line_re + + # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern. + if not isinstance(first_line_re.pattern, str): + first_line_re = re.compile(first_line_re.pattern.decode()) + + first = (script_text+'\n').splitlines()[0] + match = first_line_re.match(first) + options = '' + if match: + options = match.group(1) or '' + if options: options = ' '+options + if wininst: + executable = "python.exe" + else: + executable = nt_quote_arg(executable) + hdr = "#!%(executable)s%(options)s\n" % locals() + if not isascii(hdr): + # Non-ascii path to sys.executable, use -x to prevent warnings + if options: + if options.strip().startswith('-'): + options = ' -x'+options.strip()[1:] + # else: punt, we can't do it, let the warning happen anyway + else: + options = ' -x' + executable = fix_jython_executable(executable, options) + hdr = "#!%(executable)s%(options)s\n" % locals() + return hdr + +def auto_chmod(func, arg, exc): + if func is os.remove and os.name=='nt': + chmod(arg, stat.S_IWRITE) + return func(arg) + et, ev, _ = sys.exc_info() + reraise(et, (ev[0], ev[1] + (" %s %s" % (func,arg)))) + +def uncache_zipdir(path): + """Ensure that the importer caches dont have stale info for `path`""" + from zipimport import _zip_directory_cache as zdc + _uncache(path, zdc) + _uncache(path, sys.path_importer_cache) + +def _uncache(path, cache): + if path in cache: + del cache[path] + else: + path = normalize_path(path) + for p in cache: + if normalize_path(p)==path: + del cache[p] + return + +def is_python(text, filename='<string>'): + "Is this string a valid Python script?" + try: + compile(text, filename, 'exec') + except (SyntaxError, TypeError): + return False + else: + return True + +def is_sh(executable): + """Determine if the specified executable is a .sh (contains a #! line)""" + try: + fp = open(executable) + magic = fp.read(2) + fp.close() + except (OSError,IOError): return executable + return magic == '#!' + +def nt_quote_arg(arg): + """Quote a command line argument according to Windows parsing rules""" + + result = [] + needquote = False + nb = 0 + + needquote = (" " in arg) or ("\t" in arg) + if needquote: + result.append('"') + + for c in arg: + if c == '\\': + nb += 1 + elif c == '"': + # double preceding backslashes, then add a \" + result.append('\\' * (nb*2) + '\\"') + nb = 0 + else: + if nb: + result.append('\\' * nb) + nb = 0 + result.append(c) + + if nb: + result.append('\\' * nb) + + if needquote: + result.append('\\' * nb) # double the trailing backslashes + result.append('"') + + return ''.join(result) + +def is_python_script(script_text, filename): + """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. + """ + if filename.endswith('.py') or filename.endswith('.pyw'): + return True # extension says it's Python + if is_python(script_text, filename): + return True # it's syntactically valid Python + if script_text.startswith('#!'): + # It begins with a '#!' line, so check if 'python' is in it somewhere + return 'python' in script_text.splitlines()[0].lower() + + return False # Not any Python I can recognize + +try: + from os import chmod as _chmod +except ImportError: + # Jython compatibility + def _chmod(*args): pass + +def chmod(path, mode): + log.debug("changing mode of %s to %o", path, mode) + try: + _chmod(path, mode) + except os.error: + e = sys.exc_info()[1] + log.debug("chmod failed: %s", e) + +def fix_jython_executable(executable, options): + if sys.platform.startswith('java') and is_sh(executable): + # Workaround for Jython is not needed on Linux systems. + import java + if java.lang.System.getProperty("os.name") == "Linux": + return executable + + # Workaround Jython's sys.executable being a .sh (an invalid + # shebang line interpreter) + if options: + # Can't apply the workaround, leave it broken + log.warn( + "WARNING: Unable to adapt shebang line for Jython," + " the following script is NOT executable\n" + " see http://bugs.jython.org/issue1112 for" + " more information.") + else: + return '/usr/bin/env %s' % executable + return executable + + +class ScriptWriter(object): + """ + Encapsulates behavior around writing entry point scripts for console and + gui apps. + """ + + template = textwrap.dedent(""" + # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r + __requires__ = %(spec)r + import sys + from pkg_resources import load_entry_point + + if __name__ == '__main__': + sys.exit( + load_entry_point(%(spec)r, %(group)r, %(name)r)() + ) + """).lstrip() + + @classmethod + def get_script_args(cls, dist, executable=sys_executable, wininst=False): + """ + Yield write_script() argument tuples for a distribution's entrypoints + """ + gen_class = cls.get_writer(wininst) + spec = str(dist.as_requirement()) + header = get_script_header("", executable, wininst) + for type_ in 'console', 'gui': + group = type_ + '_scripts' + for name, ep in dist.get_entry_map(group).items(): + script_text = gen_class.template % locals() + for res in gen_class._get_script_args(type_, name, header, + script_text): + yield res + + @classmethod + def get_writer(cls, force_windows): + if force_windows or sys.platform=='win32': + return WindowsScriptWriter.get_writer() + return cls + + @classmethod + def _get_script_args(cls, type_, name, header, script_text): + # Simply write the stub with no extension. + yield (name, header+script_text) + + +class WindowsScriptWriter(ScriptWriter): + @classmethod + def get_writer(cls): + """ + Get a script writer suitable for Windows + """ + writer_lookup = dict( + executable=WindowsExecutableLauncherWriter, + natural=cls, + ) + # for compatibility, use the executable launcher by default + launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable') + return writer_lookup[launcher] + + @classmethod + def _get_script_args(cls, type_, name, header, script_text): + "For Windows, add a .py extension" + ext = dict(console='.pya', gui='.pyw')[type_] + if ext not in os.environ['PATHEXT'].lower().split(';'): + warnings.warn("%s not listed in PATHEXT; scripts will not be " + "recognized as executables." % ext, UserWarning) + old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe'] + old.remove(ext) + header = cls._adjust_header(type_, header) + blockers = [name+x for x in old] + yield name+ext, header+script_text, 't', blockers + + @staticmethod + def _adjust_header(type_, orig_header): + """ + Make sure 'pythonw' is used for gui and and 'python' is used for + console (regardless of what sys.executable is). + """ + pattern = 'pythonw.exe' + repl = 'python.exe' + if type_ == 'gui': + pattern, repl = repl, pattern + pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE) + new_header = pattern_ob.sub(string=orig_header, repl=repl) + clean_header = new_header[2:-1].strip('"') + if sys.platform == 'win32' and not os.path.exists(clean_header): + # the adjusted version doesn't exist, so return the original + return orig_header + return new_header + + +class WindowsExecutableLauncherWriter(WindowsScriptWriter): + @classmethod + def _get_script_args(cls, type_, name, header, script_text): + """ + For Windows, add a .py extension and an .exe launcher + """ + if type_=='gui': + launcher_type = 'gui' + ext = '-script.pyw' + old = ['.pyw'] + else: + launcher_type = 'cli' + ext = '-script.py' + old = ['.py','.pyc','.pyo'] + hdr = cls._adjust_header(type_, header) + blockers = [name+x for x in old] + yield (name+ext, hdr+script_text, 't', blockers) + yield ( + name+'.exe', get_win_launcher(launcher_type), + 'b' # write in binary mode + ) + if not is_64bit(): + # install a manifest for the launcher to prevent Windows + # from detecting it as an installer (which it will for + # launchers like easy_install.exe). Consider only + # adding a manifest for launchers detected as installers. + # See Distribute #143 for details. + m_name = name + '.exe.manifest' + yield (m_name, load_launcher_manifest(name), 't') + +# for backward-compatibility +get_script_args = ScriptWriter.get_script_args + +def get_win_launcher(type): + """ + Load the Windows launcher (executable) suitable for launching a script. + + `type` should be either 'cli' or 'gui' + + Returns the executable as a byte string. + """ + launcher_fn = '%s.exe' % type + if platform.machine().lower()=='arm': + launcher_fn = launcher_fn.replace(".", "-arm.") + if is_64bit(): + launcher_fn = launcher_fn.replace(".", "-64.") + else: + launcher_fn = launcher_fn.replace(".", "-32.") + return resource_string('setuptools', launcher_fn) + +def load_launcher_manifest(name): + manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml') + if sys.version_info[0] < 3: + return manifest % vars() + else: + return manifest.decode('utf-8') % vars() + +def rmtree(path, ignore_errors=False, onerror=auto_chmod): + """Recursively delete a directory tree. + + This code is taken from the Python 2.4 version of 'shutil', because + the 2.3 version doesn't really work right. + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + names = [] + try: + names = os.listdir(path) + except os.error: + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + mode = os.lstat(fullname).st_mode + except os.error: + mode = 0 + if stat.S_ISDIR(mode): + rmtree(fullname, ignore_errors, onerror) + else: + try: + os.remove(fullname) + except os.error: + onerror(os.remove, fullname, sys.exc_info()) + try: + os.rmdir(path) + except os.error: + onerror(os.rmdir, path, sys.exc_info()) + +def current_umask(): + tmp = os.umask(0x12) # 022 + os.umask(tmp) + return tmp + +def bootstrap(): + # This function is called when setuptools*.egg is run using /bin/sh + import setuptools + argv0 = os.path.dirname(setuptools.__path__[0]) + sys.argv[0] = argv0 + sys.argv.append(argv0) + main() + +def main(argv=None, **kw): + from setuptools import setup + from setuptools.dist import Distribution + import distutils.core + + USAGE = """\ +usage: %(script)s [options] requirement_or_url ... + or: %(script)s --help +""" + + def gen_usage(script_name): + return USAGE % dict( + script=os.path.basename(script_name), + ) + + def with_ei_usage(f): + old_gen_usage = distutils.core.gen_usage + try: + distutils.core.gen_usage = gen_usage + return f() + finally: + distutils.core.gen_usage = old_gen_usage + + class DistributionWithoutHelpCommands(Distribution): + common_usage = "" + + def _show_help(self,*args,**kw): + with_ei_usage(lambda: Distribution._show_help(self,*args,**kw)) + + if argv is None: + argv = sys.argv[1:] + + with_ei_usage(lambda: + setup( + script_args = ['-q','easy_install', '-v']+argv, + script_name = sys.argv[0] or 'easy_install', + distclass=DistributionWithoutHelpCommands, **kw + ) + ) diff --git a/awx/lib/site-packages/setuptools/command/egg_info.py b/awx/lib/site-packages/setuptools/command/egg_info.py new file mode 100644 index 0000000000..642687b23f --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/egg_info.py @@ -0,0 +1,489 @@ +"""setuptools.command.egg_info + +Create a distribution's .egg-info directory and contents""" + +# This module should be kept compatible with Python 2.3 +import os, re, sys +from setuptools import Command +from distutils.errors import * +from distutils import log +from setuptools.command.sdist import sdist +from setuptools.compat import basestring +from distutils.util import convert_path +from distutils.filelist import FileList as _FileList +from pkg_resources import parse_requirements, safe_name, parse_version, \ + safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename +from setuptools.command.sdist import walk_revctrl + +class egg_info(Command): + description = "create a distribution's .egg-info directory" + + user_options = [ + ('egg-base=', 'e', "directory containing .egg-info directories" + " (default: top of the source tree)"), + ('tag-svn-revision', 'r', + "Add subversion revision ID to version number"), + ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), + ('tag-build=', 'b', "Specify explicit tag to add to version number"), + ('no-svn-revision', 'R', + "Don't add subversion revision ID [default]"), + ('no-date', 'D', "Don't include date stamp [default]"), + ] + + boolean_options = ['tag-date', 'tag-svn-revision'] + negative_opt = {'no-svn-revision': 'tag-svn-revision', + 'no-date': 'tag-date'} + + + + + + + + def initialize_options(self): + self.egg_name = None + self.egg_version = None + self.egg_base = None + self.egg_info = None + self.tag_build = None + self.tag_svn_revision = 0 + self.tag_date = 0 + self.broken_egg_info = False + self.vtags = None + + def save_version_info(self, filename): + from setuptools.command.setopt import edit_config + edit_config( + filename, + {'egg_info': + {'tag_svn_revision':0, 'tag_date': 0, 'tag_build': self.tags()} + } + ) + + + + + + + + + + + + + + + + + + + + + + + def finalize_options (self): + self.egg_name = safe_name(self.distribution.get_name()) + self.vtags = self.tags() + self.egg_version = self.tagged_version() + + try: + list( + parse_requirements('%s==%s' % (self.egg_name,self.egg_version)) + ) + except ValueError: + raise DistutilsOptionError( + "Invalid distribution name or version syntax: %s-%s" % + (self.egg_name,self.egg_version) + ) + + if self.egg_base is None: + dirs = self.distribution.package_dir + self.egg_base = (dirs or {}).get('',os.curdir) + + self.ensure_dirname('egg_base') + self.egg_info = to_filename(self.egg_name)+'.egg-info' + if self.egg_base != os.curdir: + self.egg_info = os.path.join(self.egg_base, self.egg_info) + if '-' in self.egg_name: self.check_broken_egg_info() + + # Set package version for the benefit of dumber commands + # (e.g. sdist, bdist_wininst, etc.) + # + self.distribution.metadata.version = self.egg_version + + # If we bootstrapped around the lack of a PKG-INFO, as might be the + # case in a fresh checkout, make sure that any special tags get added + # to the version info + # + pd = self.distribution._patched_dist + if pd is not None and pd.key==self.egg_name.lower(): + pd._version = self.egg_version + pd._parsed_version = parse_version(self.egg_version) + self.distribution._patched_dist = None + + + def write_or_delete_file(self, what, filename, data, force=False): + """Write `data` to `filename` or delete if empty + + If `data` is non-empty, this routine is the same as ``write_file()``. + If `data` is empty but not ``None``, this is the same as calling + ``delete_file(filename)`. If `data` is ``None``, then this is a no-op + unless `filename` exists, in which case a warning is issued about the + orphaned file (if `force` is false), or deleted (if `force` is true). + """ + if data: + self.write_file(what, filename, data) + elif os.path.exists(filename): + if data is None and not force: + log.warn( + "%s not set in setup(), but %s exists", what, filename + ) + return + else: + self.delete_file(filename) + + def write_file(self, what, filename, data): + """Write `data` to `filename` (if not a dry run) after announcing it + + `what` is used in a log message to identify what is being written + to the file. + """ + log.info("writing %s to %s", what, filename) + if sys.version_info >= (3,): + data = data.encode("utf-8") + if not self.dry_run: + f = open(filename, 'wb') + f.write(data) + f.close() + + def delete_file(self, filename): + """Delete `filename` (if not a dry run) after announcing it""" + log.info("deleting %s", filename) + if not self.dry_run: + os.unlink(filename) + + def tagged_version(self): + version = self.distribution.get_version() + # egg_info may be called more than once for a distribution, + # in which case the version string already contains all tags. + if self.vtags and version.endswith(self.vtags): + return safe_version(version) + return safe_version(version + self.vtags) + + def run(self): + self.mkpath(self.egg_info) + installer = self.distribution.fetch_build_egg + for ep in iter_entry_points('egg_info.writers'): + writer = ep.load(installer=installer) + writer(self, ep.name, os.path.join(self.egg_info,ep.name)) + + # Get rid of native_libs.txt if it was put there by older bdist_egg + nl = os.path.join(self.egg_info, "native_libs.txt") + if os.path.exists(nl): + self.delete_file(nl) + + self.find_sources() + + def tags(self): + version = '' + if self.tag_build: + version+=self.tag_build + if self.tag_svn_revision and ( + os.path.exists('.svn') or os.path.exists('PKG-INFO') + ): version += '-r%s' % self.get_svn_revision() + if self.tag_date: + import time; version += time.strftime("-%Y%m%d") + return version + + + + + + + + + + + + + + + + + + @staticmethod + def get_svn_revision(): + revision = 0 + urlre = re.compile('url="([^"]+)"') + revre = re.compile('committed-rev="(\d+)"') + + for base,dirs,files in os.walk(os.curdir): + if '.svn' not in dirs: + dirs[:] = [] + continue # no sense walking uncontrolled subdirs + dirs.remove('.svn') + f = open(os.path.join(base,'.svn','entries')) + data = f.read() + f.close() + + if data.startswith('<?xml'): + dirurl = urlre.search(data).group(1) # get repository URL + localrev = max([int(m.group(1)) for m in revre.finditer(data)]+[0]) + else: + try: svnver = int(data.splitlines()[0]) + except: svnver=-1 + if svnver<8: + log.warn("unrecognized .svn/entries format; skipping %s", base) + dirs[:] = [] + continue + + data = list(map(str.splitlines,data.split('\n\x0c\n'))) + del data[0][0] # get rid of the '8' or '9' or '10' + dirurl = data[0][3] + localrev = max([int(d[9]) for d in data if len(d)>9 and d[9]]+[0]) + if base==os.curdir: + base_url = dirurl+'/' # save the root url + elif not dirurl.startswith(base_url): + dirs[:] = [] + continue # not part of the same svn tree, skip it + revision = max(revision, localrev) + + return str(revision or get_pkg_info_revision()) + + + + + def find_sources(self): + """Generate SOURCES.txt manifest file""" + manifest_filename = os.path.join(self.egg_info,"SOURCES.txt") + mm = manifest_maker(self.distribution) + mm.manifest = manifest_filename + mm.run() + self.filelist = mm.filelist + + def check_broken_egg_info(self): + bei = self.egg_name+'.egg-info' + if self.egg_base != os.curdir: + bei = os.path.join(self.egg_base, bei) + if os.path.exists(bei): + log.warn( + "-"*78+'\n' + "Note: Your current .egg-info directory has a '-' in its name;" + '\nthis will not work correctly with "setup.py develop".\n\n' + 'Please rename %s to %s to correct this problem.\n'+'-'*78, + bei, self.egg_info + ) + self.broken_egg_info = self.egg_info + self.egg_info = bei # make it work for now + +class FileList(_FileList): + """File list that accepts only existing, platform-independent paths""" + + def append(self, item): + if item.endswith('\r'): # Fix older sdists built on Windows + item = item[:-1] + path = convert_path(item) + + if sys.version_info >= (3,): + try: + if os.path.exists(path) or os.path.exists(path.encode('utf-8')): + self.files.append(path) + except UnicodeEncodeError: + # Accept UTF-8 filenames even if LANG=C + if os.path.exists(path.encode('utf-8')): + self.files.append(path) + else: + log.warn("'%s' not %s encodable -- skipping", path, + sys.getfilesystemencoding()) + else: + if os.path.exists(path): + self.files.append(path) + + + + + + + + +class manifest_maker(sdist): + + template = "MANIFEST.in" + + def initialize_options (self): + self.use_defaults = 1 + self.prune = 1 + self.manifest_only = 1 + self.force_manifest = 1 + + def finalize_options(self): + pass + + def run(self): + self.filelist = FileList() + if not os.path.exists(self.manifest): + self.write_manifest() # it must exist so it'll get in the list + self.filelist.findall() + self.add_defaults() + if os.path.exists(self.template): + self.read_template() + self.prune_file_list() + self.filelist.sort() + self.filelist.remove_duplicates() + self.write_manifest() + + def write_manifest (self): + """Write the file list in 'self.filelist' (presumably as filled in + by 'add_defaults()' and 'read_template()') to the manifest file + named by 'self.manifest'. + """ + # The manifest must be UTF-8 encodable. See #303. + if sys.version_info >= (3,): + files = [] + for file in self.filelist.files: + try: + file.encode("utf-8") + except UnicodeEncodeError: + log.warn("'%s' not UTF-8 encodable -- skipping" % file) + else: + files.append(file) + self.filelist.files = files + + files = self.filelist.files + if os.sep!='/': + files = [f.replace(os.sep,'/') for f in files] + self.execute(write_file, (self.manifest, files), + "writing manifest file '%s'" % self.manifest) + + def warn(self, msg): # suppress missing-file warnings from sdist + if not msg.startswith("standard file not found:"): + sdist.warn(self, msg) + + def add_defaults(self): + sdist.add_defaults(self) + self.filelist.append(self.template) + self.filelist.append(self.manifest) + rcfiles = list(walk_revctrl()) + if rcfiles: + self.filelist.extend(rcfiles) + elif os.path.exists(self.manifest): + self.read_manifest() + ei_cmd = self.get_finalized_command('egg_info') + self.filelist.include_pattern("*", prefix=ei_cmd.egg_info) + + def prune_file_list (self): + build = self.get_finalized_command('build') + base_dir = self.distribution.get_fullname() + self.filelist.exclude_pattern(None, prefix=build.build_base) + self.filelist.exclude_pattern(None, prefix=base_dir) + sep = re.escape(os.sep) + self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1) + + +def write_file (filename, contents): + """Create a file with the specified name and write 'contents' (a + sequence of strings without line terminators) to it. + """ + contents = "\n".join(contents) + if sys.version_info >= (3,): + contents = contents.encode("utf-8") + f = open(filename, "wb") # always write POSIX-style manifest + f.write(contents) + f.close() + + + + + + + + + + + + + +def write_pkg_info(cmd, basename, filename): + log.info("writing %s", filename) + if not cmd.dry_run: + metadata = cmd.distribution.metadata + metadata.version, oldver = cmd.egg_version, metadata.version + metadata.name, oldname = cmd.egg_name, metadata.name + try: + # write unescaped data to PKG-INFO, so older pkg_resources + # can still parse it + metadata.write_pkg_info(cmd.egg_info) + finally: + metadata.name, metadata.version = oldname, oldver + + safe = getattr(cmd.distribution,'zip_safe',None) + from setuptools.command import bdist_egg + bdist_egg.write_safety_flag(cmd.egg_info, safe) + +def warn_depends_obsolete(cmd, basename, filename): + if os.path.exists(filename): + log.warn( + "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" + "Use the install_requires/extras_require setup() args instead." + ) + + +def write_requirements(cmd, basename, filename): + dist = cmd.distribution + data = ['\n'.join(yield_lines(dist.install_requires or ()))] + for extra,reqs in (dist.extras_require or {}).items(): + data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs)))) + cmd.write_or_delete_file("requirements", filename, ''.join(data)) + +def write_toplevel_names(cmd, basename, filename): + pkgs = dict.fromkeys( + [k.split('.',1)[0] + for k in cmd.distribution.iter_distribution_names() + ] + ) + cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n') + + + +def overwrite_arg(cmd, basename, filename): + write_arg(cmd, basename, filename, True) + +def write_arg(cmd, basename, filename, force=False): + argname = os.path.splitext(basename)[0] + value = getattr(cmd.distribution, argname, None) + if value is not None: + value = '\n'.join(value)+'\n' + cmd.write_or_delete_file(argname, filename, value, force) + +def write_entries(cmd, basename, filename): + ep = cmd.distribution.entry_points + + if isinstance(ep,basestring) or ep is None: + data = ep + elif ep is not None: + data = [] + for section, contents in ep.items(): + if not isinstance(contents,basestring): + contents = EntryPoint.parse_group(section, contents) + contents = '\n'.join(map(str,contents.values())) + data.append('[%s]\n%s\n\n' % (section,contents)) + data = ''.join(data) + + cmd.write_or_delete_file('entry points', filename, data, True) + +def get_pkg_info_revision(): + # See if we can get a -r### off of PKG-INFO, in case this is an sdist of + # a subversion revision + # + if os.path.exists('PKG-INFO'): + f = open('PKG-INFO','rU') + for line in f: + match = re.match(r"Version:.*-r(\d+)\s*$", line) + if match: + return int(match.group(1)) + f.close() + return 0 + + + +# diff --git a/awx/lib/site-packages/setuptools/command/install.py b/awx/lib/site-packages/setuptools/command/install.py new file mode 100644 index 0000000000..247c4f259c --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/install.py @@ -0,0 +1,124 @@ +import setuptools, sys, glob +from distutils.command.install import install as _install +from distutils.errors import DistutilsArgError + +class install(_install): + """Use easy_install to install the package, w/dependencies""" + + user_options = _install.user_options + [ + ('old-and-unmanageable', None, "Try not to use this!"), + ('single-version-externally-managed', None, + "used by system package builders to create 'flat' eggs"), + ] + boolean_options = _install.boolean_options + [ + 'old-and-unmanageable', 'single-version-externally-managed', + ] + new_commands = [ + ('install_egg_info', lambda self: True), + ('install_scripts', lambda self: True), + ] + _nc = dict(new_commands) + + def initialize_options(self): + _install.initialize_options(self) + self.old_and_unmanageable = None + self.single_version_externally_managed = None + self.no_compile = None # make DISTUTILS_DEBUG work right! + + def finalize_options(self): + _install.finalize_options(self) + if self.root: + self.single_version_externally_managed = True + elif self.single_version_externally_managed: + if not self.root and not self.record: + raise DistutilsArgError( + "You must specify --record or --root when building system" + " packages" + ) + + def handle_extra_path(self): + if self.root or self.single_version_externally_managed: + # explicit backward-compatibility mode, allow extra_path to work + return _install.handle_extra_path(self) + + # Ignore extra_path when installing an egg (or being run by another + # command without --root or --single-version-externally-managed + self.path_file = None + self.extra_dirs = '' + + + def run(self): + # Explicit request for old-style install? Just do it + if self.old_and_unmanageable or self.single_version_externally_managed: + return _install.run(self) + + # Attempt to detect whether we were called from setup() or by another + # command. If we were called by setup(), our caller will be the + # 'run_command' method in 'distutils.dist', and *its* caller will be + # the 'run_commands' method. If we were called any other way, our + # immediate caller *might* be 'run_command', but it won't have been + # called by 'run_commands'. This is slightly kludgy, but seems to + # work. + # + caller = sys._getframe(2) + caller_module = caller.f_globals.get('__name__','') + caller_name = caller.f_code.co_name + + if caller_module != 'distutils.dist' or caller_name!='run_commands': + # We weren't called from the command line or setup(), so we + # should run in backward-compatibility mode to support bdist_* + # commands. + _install.run(self) + else: + self.do_egg_install() + + + + + + + def do_egg_install(self): + + easy_install = self.distribution.get_command_class('easy_install') + + cmd = easy_install( + self.distribution, args="x", root=self.root, record=self.record, + ) + cmd.ensure_finalized() # finalize before bdist_egg munges install cmd + cmd.always_copy_from = '.' # make sure local-dir eggs get installed + + # pick up setup-dir .egg files only: no .egg-info + cmd.package_index.scan(glob.glob('*.egg')) + + self.run_command('bdist_egg') + args = [self.distribution.get_command_obj('bdist_egg').egg_output] + + if setuptools.bootstrap_install_from: + # Bootstrap self-installation of setuptools + args.insert(0, setuptools.bootstrap_install_from) + + cmd.args = args + cmd.run() + setuptools.bootstrap_install_from = None + +# XXX Python 3.1 doesn't see _nc if this is inside the class +install.sub_commands = [ + cmd for cmd in _install.sub_commands if cmd[0] not in install._nc + ] + install.new_commands + + + + + + + + + + + + + + + + +# diff --git a/awx/lib/site-packages/setuptools/command/install_egg_info.py b/awx/lib/site-packages/setuptools/command/install_egg_info.py new file mode 100644 index 0000000000..f44b34b555 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/install_egg_info.py @@ -0,0 +1,125 @@ +from setuptools import Command +from setuptools.archive_util import unpack_archive +from distutils import log, dir_util +import os, shutil, pkg_resources + +class install_egg_info(Command): + """Install an .egg-info directory for the package""" + + description = "Install an .egg-info directory for the package" + + user_options = [ + ('install-dir=', 'd', "directory to install to"), + ] + + def initialize_options(self): + self.install_dir = None + + def finalize_options(self): + self.set_undefined_options('install_lib',('install_dir','install_dir')) + ei_cmd = self.get_finalized_command("egg_info") + basename = pkg_resources.Distribution( + None, None, ei_cmd.egg_name, ei_cmd.egg_version + ).egg_name()+'.egg-info' + self.source = ei_cmd.egg_info + self.target = os.path.join(self.install_dir, basename) + self.outputs = [self.target] + + def run(self): + self.run_command('egg_info') + target = self.target + if os.path.isdir(self.target) and not os.path.islink(self.target): + dir_util.remove_tree(self.target, dry_run=self.dry_run) + elif os.path.exists(self.target): + self.execute(os.unlink,(self.target,),"Removing "+self.target) + if not self.dry_run: + pkg_resources.ensure_directory(self.target) + self.execute(self.copytree, (), + "Copying %s to %s" % (self.source, self.target) + ) + self.install_namespaces() + + def get_outputs(self): + return self.outputs + + def copytree(self): + # Copy the .egg-info tree to site-packages + def skimmer(src,dst): + # filter out source-control directories; note that 'src' is always + # a '/'-separated path, regardless of platform. 'dst' is a + # platform-specific path. + for skip in '.svn/','CVS/': + if src.startswith(skip) or '/'+skip in src: + return None + self.outputs.append(dst) + log.debug("Copying %s to %s", src, dst) + return dst + unpack_archive(self.source, self.target, skimmer) + + + + + + + + + + + + + + + + + + + + + + + + + + def install_namespaces(self): + nsp = self._get_all_ns_packages() + if not nsp: return + filename,ext = os.path.splitext(self.target) + filename += '-nspkg.pth'; self.outputs.append(filename) + log.info("Installing %s",filename) + if not self.dry_run: + f = open(filename,'wt') + for pkg in nsp: + # ensure pkg is not a unicode string under Python 2.7 + pkg = str(pkg) + pth = tuple(pkg.split('.')) + trailer = '\n' + if '.' in pkg: + trailer = ( + "; m and setattr(sys.modules[%r], %r, m)\n" + % ('.'.join(pth[:-1]), pth[-1]) + ) + f.write( + "import sys,types,os; " + "p = os.path.join(sys._getframe(1).f_locals['sitedir'], " + "*%(pth)r); " + "ie = os.path.exists(os.path.join(p,'__init__.py')); " + "m = not ie and " + "sys.modules.setdefault(%(pkg)r,types.ModuleType(%(pkg)r)); " + "mp = (m or []) and m.__dict__.setdefault('__path__',[]); " + "(p not in mp) and mp.append(p)%(trailer)s" + % locals() + ) + f.close() + + def _get_all_ns_packages(self): + nsp = {} + for pkg in self.distribution.namespace_packages or []: + pkg = pkg.split('.') + while pkg: + nsp['.'.join(pkg)] = 1 + pkg.pop() + nsp=list(nsp) + nsp.sort() # set up shorter names first + return nsp + + diff --git a/awx/lib/site-packages/setuptools/command/install_lib.py b/awx/lib/site-packages/setuptools/command/install_lib.py new file mode 100644 index 0000000000..82afa1421b --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/install_lib.py @@ -0,0 +1,82 @@ +from distutils.command.install_lib import install_lib as _install_lib +import os + +class install_lib(_install_lib): + """Don't add compiled flags to filenames of non-Python files""" + + def _bytecode_filenames (self, py_filenames): + bytecode_files = [] + for py_file in py_filenames: + if not py_file.endswith('.py'): + continue + if self.compile: + bytecode_files.append(py_file + "c") + if self.optimize > 0: + bytecode_files.append(py_file + "o") + + return bytecode_files + + def run(self): + self.build() + outfiles = self.install() + if outfiles is not None: + # always compile, in case we have any extension stubs to deal with + self.byte_compile(outfiles) + + def get_exclusions(self): + exclude = {} + nsp = self.distribution.namespace_packages + + if (nsp and self.get_finalized_command('install') + .single_version_externally_managed + ): + for pkg in nsp: + parts = pkg.split('.') + while parts: + pkgdir = os.path.join(self.install_dir, *parts) + for f in '__init__.py', '__init__.pyc', '__init__.pyo': + exclude[os.path.join(pkgdir,f)] = 1 + parts.pop() + return exclude + + def copy_tree( + self, infile, outfile, + preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1 + ): + assert preserve_mode and preserve_times and not preserve_symlinks + exclude = self.get_exclusions() + + if not exclude: + return _install_lib.copy_tree(self, infile, outfile) + + # Exclude namespace package __init__.py* files from the output + + from setuptools.archive_util import unpack_directory + from distutils import log + + outfiles = [] + + def pf(src, dst): + if dst in exclude: + log.warn("Skipping installation of %s (namespace package)",dst) + return False + + log.info("copying %s -> %s", src, os.path.dirname(dst)) + outfiles.append(dst) + return dst + + unpack_directory(infile, outfile, pf) + return outfiles + + def get_outputs(self): + outputs = _install_lib.get_outputs(self) + exclude = self.get_exclusions() + if exclude: + return [f for f in outputs if f not in exclude] + return outputs + + + + + + diff --git a/awx/lib/site-packages/setuptools/command/install_scripts.py b/awx/lib/site-packages/setuptools/command/install_scripts.py new file mode 100644 index 0000000000..105dabca6a --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/install_scripts.py @@ -0,0 +1,54 @@ +from distutils.command.install_scripts import install_scripts \ + as _install_scripts +from pkg_resources import Distribution, PathMetadata, ensure_directory +import os +from distutils import log + +class install_scripts(_install_scripts): + """Do normal script install, plus any egg_info wrapper scripts""" + + def initialize_options(self): + _install_scripts.initialize_options(self) + self.no_ep = False + + def run(self): + from setuptools.command.easy_install import get_script_args + from setuptools.command.easy_install import sys_executable + + self.run_command("egg_info") + if self.distribution.scripts: + _install_scripts.run(self) # run first to set up self.outfiles + else: + self.outfiles = [] + if self.no_ep: + # don't install entry point scripts into .egg file! + return + + ei_cmd = self.get_finalized_command("egg_info") + dist = Distribution( + ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), + ei_cmd.egg_name, ei_cmd.egg_version, + ) + bs_cmd = self.get_finalized_command('build_scripts') + executable = getattr(bs_cmd,'executable',sys_executable) + is_wininst = getattr( + self.get_finalized_command("bdist_wininst"), '_is_running', False + ) + for args in get_script_args(dist, executable, is_wininst): + self.write_script(*args) + + def write_script(self, script_name, contents, mode="t", *ignored): + """Write an executable file to the scripts directory""" + from setuptools.command.easy_install import chmod, current_umask + log.info("Installing %s script to %s", script_name, self.install_dir) + target = os.path.join(self.install_dir, script_name) + self.outfiles.append(target) + + mask = current_umask() + if not self.dry_run: + ensure_directory(target) + f = open(target,"w"+mode) + f.write(contents) + f.close() + chmod(target, 0x1FF-mask) # 0777 + diff --git a/awx/lib/site-packages/setuptools/command/register.py b/awx/lib/site-packages/setuptools/command/register.py new file mode 100644 index 0000000000..3b2e085907 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/register.py @@ -0,0 +1,10 @@ +from distutils.command.register import register as _register + +class register(_register): + __doc__ = _register.__doc__ + + def run(self): + # Make sure that we are using valid current name/version info + self.run_command('egg_info') + _register.run(self) + diff --git a/awx/lib/site-packages/setuptools/command/rotate.py b/awx/lib/site-packages/setuptools/command/rotate.py new file mode 100644 index 0000000000..b10acfb41f --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/rotate.py @@ -0,0 +1,83 @@ +import distutils, os +from setuptools import Command +from setuptools.compat import basestring +from distutils.util import convert_path +from distutils import log +from distutils.errors import * + +class rotate(Command): + """Delete older distributions""" + + description = "delete older distributions, keeping N newest files" + user_options = [ + ('match=', 'm', "patterns to match (required)"), + ('dist-dir=', 'd', "directory where the distributions are"), + ('keep=', 'k', "number of matching distributions to keep"), + ] + + boolean_options = [] + + def initialize_options(self): + self.match = None + self.dist_dir = None + self.keep = None + + def finalize_options(self): + if self.match is None: + raise DistutilsOptionError( + "Must specify one or more (comma-separated) match patterns " + "(e.g. '.zip' or '.egg')" + ) + if self.keep is None: + raise DistutilsOptionError("Must specify number of files to keep") + try: + self.keep = int(self.keep) + except ValueError: + raise DistutilsOptionError("--keep must be an integer") + if isinstance(self.match, basestring): + self.match = [ + convert_path(p.strip()) for p in self.match.split(',') + ] + self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) + + def run(self): + self.run_command("egg_info") + from glob import glob + for pattern in self.match: + pattern = self.distribution.get_name()+'*'+pattern + files = glob(os.path.join(self.dist_dir,pattern)) + files = [(os.path.getmtime(f),f) for f in files] + files.sort() + files.reverse() + + log.info("%d file(s) matching %s", len(files), pattern) + files = files[self.keep:] + for (t,f) in files: + log.info("Deleting %s", f) + if not self.dry_run: + os.unlink(f) + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/awx/lib/site-packages/setuptools/command/saveopts.py b/awx/lib/site-packages/setuptools/command/saveopts.py new file mode 100644 index 0000000000..7209be4cd9 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/saveopts.py @@ -0,0 +1,24 @@ +import distutils, os +from setuptools import Command +from setuptools.command.setopt import edit_config, option_base + +class saveopts(option_base): + """Save command-line options to a file""" + + description = "save supplied options to setup.cfg or other config file" + + def run(self): + dist = self.distribution + settings = {} + + for cmd in dist.command_options: + + if cmd=='saveopts': + continue # don't save our own options! + + for opt,(src,val) in dist.get_option_dict(cmd).items(): + if src=="command line": + settings.setdefault(cmd,{})[opt] = val + + edit_config(self.filename, settings, self.dry_run) + diff --git a/awx/lib/site-packages/setuptools/command/sdist.py b/awx/lib/site-packages/setuptools/command/sdist.py new file mode 100644 index 0000000000..6d63e819e7 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/sdist.py @@ -0,0 +1,282 @@ +import os +import re +import sys +from glob import glob + +import pkg_resources +from distutils.command.sdist import sdist as _sdist +from distutils.util import convert_path +from distutils import log + +READMES = ('README', 'README.rst', 'README.txt') + +entities = [ + ("<","<"), (">", ">"), (""", '"'), ("'", "'"), + ("&", "&") +] + +def unescape(data): + for old,new in entities: + data = data.replace(old,new) + return data + +def re_finder(pattern, postproc=None): + def find(dirname, filename): + f = open(filename,'rU') + data = f.read() + f.close() + for match in pattern.finditer(data): + path = match.group(1) + if postproc: + path = postproc(path) + yield joinpath(dirname,path) + return find + +def joinpath(prefix,suffix): + if not prefix: + return suffix + return os.path.join(prefix,suffix) + +def walk_revctrl(dirname=''): + """Find all files under revision control""" + for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): + for item in ep.load()(dirname): + yield item + +def _default_revctrl(dirname=''): + for path, finder in finders: + path = joinpath(dirname,path) + if os.path.isfile(path): + for path in finder(dirname,path): + if os.path.isfile(path): + yield path + elif os.path.isdir(path): + for item in _default_revctrl(path): + yield item + +def externals_finder(dirname, filename): + """Find any 'svn:externals' directories""" + found = False + f = open(filename,'rt') + for line in iter(f.readline, ''): # can't use direct iter! + parts = line.split() + if len(parts)==2: + kind,length = parts + data = f.read(int(length)) + if kind=='K' and data=='svn:externals': + found = True + elif kind=='V' and found: + f.close() + break + else: + f.close() + return + + for line in data.splitlines(): + parts = line.split() + if parts: + yield joinpath(dirname, parts[0]) + + +entries_pattern = re.compile(r'name="([^"]+)"(?![^>]+deleted="true")', re.I) + +def entries_finder(dirname, filename): + f = open(filename,'rU') + data = f.read() + f.close() + if data.startswith('<?xml'): + for match in entries_pattern.finditer(data): + yield joinpath(dirname,unescape(match.group(1))) + else: + svnver=-1 + try: svnver = int(data.splitlines()[0]) + except: pass + if svnver<8: + log.warn("unrecognized .svn/entries format in %s", os.path.abspath(dirname)) + return + for record in map(str.splitlines, data.split('\n\x0c\n')[1:]): + # subversion 1.6/1.5/1.4 + if not record or len(record)>=6 and record[5]=="delete": + continue # skip deleted + yield joinpath(dirname, record[0]) + + +finders = [ + (convert_path('CVS/Entries'), + re_finder(re.compile(r"^\w?/([^/]+)/", re.M))), + (convert_path('.svn/entries'), entries_finder), + (convert_path('.svn/dir-props'), externals_finder), + (convert_path('.svn/dir-prop-base'), externals_finder), # svn 1.4 +] + + +class sdist(_sdist): + """Smart sdist that finds anything supported by revision control""" + + user_options = [ + ('formats=', None, + "formats for source distribution (comma-separated list)"), + ('keep-temp', 'k', + "keep the distribution tree around after creating " + + "archive file(s)"), + ('dist-dir=', 'd', + "directory to put the source distribution archive(s) in " + "[default: dist]"), + ] + + negative_opt = {} + + def run(self): + self.run_command('egg_info') + ei_cmd = self.get_finalized_command('egg_info') + self.filelist = ei_cmd.filelist + self.filelist.append(os.path.join(ei_cmd.egg_info,'SOURCES.txt')) + self.check_readme() + + # Run sub commands + for cmd_name in self.get_sub_commands(): + self.run_command(cmd_name) + + # Call check_metadata only if no 'check' command + # (distutils <= 2.6) + import distutils.command + if 'check' not in distutils.command.__all__: + self.check_metadata() + + self.make_distribution() + + dist_files = getattr(self.distribution,'dist_files',[]) + for file in self.archive_files: + data = ('sdist', '', file) + if data not in dist_files: + dist_files.append(data) + + def __read_template_hack(self): + # This grody hack closes the template file (MANIFEST.in) if an + # exception occurs during read_template. + # Doing so prevents an error when easy_install attempts to delete the + # file. + try: + _sdist.read_template(self) + except: + sys.exc_info()[2].tb_next.tb_frame.f_locals['template'].close() + raise + # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle + # has been fixed, so only override the method if we're using an earlier + # Python. + has_leaky_handle = ( + sys.version_info < (2,7,2) + or (3,0) <= sys.version_info < (3,1,4) + or (3,2) <= sys.version_info < (3,2,1) + ) + if has_leaky_handle: + read_template = __read_template_hack + + def add_defaults(self): + standards = [READMES, + self.distribution.script_name] + for fn in standards: + if isinstance(fn, tuple): + alts = fn + got_it = 0 + for fn in alts: + if os.path.exists(fn): + got_it = 1 + self.filelist.append(fn) + break + + if not got_it: + self.warn("standard file not found: should have one of " + + ', '.join(alts)) + else: + if os.path.exists(fn): + self.filelist.append(fn) + else: + self.warn("standard file '%s' not found" % fn) + + optional = ['test/test*.py', 'setup.cfg'] + for pattern in optional: + files = list(filter(os.path.isfile, glob(pattern))) + if files: + self.filelist.extend(files) + + # getting python files + if self.distribution.has_pure_modules(): + build_py = self.get_finalized_command('build_py') + self.filelist.extend(build_py.get_source_files()) + # This functionality is incompatible with include_package_data, and + # will in fact create an infinite recursion if include_package_data + # is True. Use of include_package_data will imply that + # distutils-style automatic handling of package_data is disabled + if not self.distribution.include_package_data: + for _, src_dir, _, filenames in build_py.data_files: + self.filelist.extend([os.path.join(src_dir, filename) + for filename in filenames]) + + if self.distribution.has_ext_modules(): + build_ext = self.get_finalized_command('build_ext') + self.filelist.extend(build_ext.get_source_files()) + + if self.distribution.has_c_libraries(): + build_clib = self.get_finalized_command('build_clib') + self.filelist.extend(build_clib.get_source_files()) + + if self.distribution.has_scripts(): + build_scripts = self.get_finalized_command('build_scripts') + self.filelist.extend(build_scripts.get_source_files()) + + def check_readme(self): + for f in READMES: + if os.path.exists(f): + return + else: + self.warn( + "standard file not found: should have one of " +', '.join(READMES) + ) + + def make_release_tree(self, base_dir, files): + _sdist.make_release_tree(self, base_dir, files) + + # Save any egg_info command line options used to create this sdist + dest = os.path.join(base_dir, 'setup.cfg') + if hasattr(os,'link') and os.path.exists(dest): + # unlink and re-copy, since it might be hard-linked, and + # we don't want to change the source version + os.unlink(dest) + self.copy_file('setup.cfg', dest) + + self.get_finalized_command('egg_info').save_version_info(dest) + + def _manifest_is_not_generated(self): + # check for special comment used in 2.7.1 and higher + if not os.path.isfile(self.manifest): + return False + + fp = open(self.manifest, 'rbU') + try: + first_line = fp.readline() + finally: + fp.close() + return first_line != '# file GENERATED by distutils, do NOT edit\n'.encode() + + def read_manifest(self): + """Read the manifest file (named by 'self.manifest') and use it to + fill in 'self.filelist', the list of files to include in the source + distribution. + """ + log.info("reading manifest file '%s'", self.manifest) + manifest = open(self.manifest, 'rbU') + for line in manifest: + # The manifest must contain UTF-8. See #303. + if sys.version_info >= (3,): + try: + line = line.decode('UTF-8') + except UnicodeDecodeError: + log.warn("%r not UTF-8 decodable -- skipping" % line) + continue + # ignore comments and blank lines + line = line.strip() + if line.startswith('#') or not line: + continue + self.filelist.append(line) + manifest.close() diff --git a/awx/lib/site-packages/setuptools/command/setopt.py b/awx/lib/site-packages/setuptools/command/setopt.py new file mode 100644 index 0000000000..aa468c88fe --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/setopt.py @@ -0,0 +1,164 @@ +import distutils, os +from setuptools import Command +from distutils.util import convert_path +from distutils import log +from distutils.errors import * + +__all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] + + +def config_file(kind="local"): + """Get the filename of the distutils, local, global, or per-user config + + `kind` must be one of "local", "global", or "user" + """ + if kind=='local': + return 'setup.cfg' + if kind=='global': + return os.path.join( + os.path.dirname(distutils.__file__),'distutils.cfg' + ) + if kind=='user': + dot = os.name=='posix' and '.' or '' + return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) + raise ValueError( + "config_file() type must be 'local', 'global', or 'user'", kind + ) + + + + + + + + + + + + + + + +def edit_config(filename, settings, dry_run=False): + """Edit a configuration file to include `settings` + + `settings` is a dictionary of dictionaries or ``None`` values, keyed by + command/section name. A ``None`` value means to delete the entire section, + while a dictionary lists settings to be changed or deleted in that section. + A setting of ``None`` means to delete that setting. + """ + from setuptools.compat import ConfigParser + log.debug("Reading configuration from %s", filename) + opts = ConfigParser.RawConfigParser() + opts.read([filename]) + for section, options in settings.items(): + if options is None: + log.info("Deleting section [%s] from %s", section, filename) + opts.remove_section(section) + else: + if not opts.has_section(section): + log.debug("Adding new section [%s] to %s", section, filename) + opts.add_section(section) + for option,value in options.items(): + if value is None: + log.debug("Deleting %s.%s from %s", + section, option, filename + ) + opts.remove_option(section,option) + if not opts.options(section): + log.info("Deleting empty [%s] section from %s", + section, filename) + opts.remove_section(section) + else: + log.debug( + "Setting %s.%s to %r in %s", + section, option, value, filename + ) + opts.set(section,option,value) + + log.info("Writing %s", filename) + if not dry_run: + f = open(filename,'w'); opts.write(f); f.close() + +class option_base(Command): + """Abstract base class for commands that mess with config files""" + + user_options = [ + ('global-config', 'g', + "save options to the site-wide distutils.cfg file"), + ('user-config', 'u', + "save options to the current user's pydistutils.cfg file"), + ('filename=', 'f', + "configuration file to use (default=setup.cfg)"), + ] + + boolean_options = [ + 'global-config', 'user-config', + ] + + def initialize_options(self): + self.global_config = None + self.user_config = None + self.filename = None + + def finalize_options(self): + filenames = [] + if self.global_config: + filenames.append(config_file('global')) + if self.user_config: + filenames.append(config_file('user')) + if self.filename is not None: + filenames.append(self.filename) + if not filenames: + filenames.append(config_file('local')) + if len(filenames)>1: + raise DistutilsOptionError( + "Must specify only one configuration file option", + filenames + ) + self.filename, = filenames + + + + +class setopt(option_base): + """Save command-line options to a file""" + + description = "set an option in setup.cfg or another config file" + + user_options = [ + ('command=', 'c', 'command to set an option for'), + ('option=', 'o', 'option to set'), + ('set-value=', 's', 'value of the option'), + ('remove', 'r', 'remove (unset) the value'), + ] + option_base.user_options + + boolean_options = option_base.boolean_options + ['remove'] + + def initialize_options(self): + option_base.initialize_options(self) + self.command = None + self.option = None + self.set_value = None + self.remove = None + + def finalize_options(self): + option_base.finalize_options(self) + if self.command is None or self.option is None: + raise DistutilsOptionError("Must specify --command *and* --option") + if self.set_value is None and not self.remove: + raise DistutilsOptionError("Must specify --set-value or --remove") + + def run(self): + edit_config( + self.filename, { + self.command: {self.option.replace('-','_'):self.set_value} + }, + self.dry_run + ) + + + + + + diff --git a/awx/lib/site-packages/setuptools/command/test.py b/awx/lib/site-packages/setuptools/command/test.py new file mode 100644 index 0000000000..db2fc7b140 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/test.py @@ -0,0 +1,198 @@ +from setuptools import Command +from distutils.errors import DistutilsOptionError +import sys +from pkg_resources import * +from pkg_resources import _namespace_packages +from unittest import TestLoader, main + +class ScanningLoader(TestLoader): + + def loadTestsFromModule(self, module): + """Return a suite of all tests cases contained in the given module + + If the module is a package, load tests from all the modules in it. + If the module has an ``additional_tests`` function, call it and add + the return value to the tests. + """ + tests = [] + if module.__name__!='setuptools.tests.doctest': # ugh + tests.append(TestLoader.loadTestsFromModule(self,module)) + + if hasattr(module, "additional_tests"): + tests.append(module.additional_tests()) + + if hasattr(module, '__path__'): + for file in resource_listdir(module.__name__, ''): + if file.endswith('.py') and file!='__init__.py': + submodule = module.__name__+'.'+file[:-3] + else: + if resource_exists( + module.__name__, file+'/__init__.py' + ): + submodule = module.__name__+'.'+file + else: + continue + tests.append(self.loadTestsFromName(submodule)) + + if len(tests)!=1: + return self.suiteClass(tests) + else: + return tests[0] # don't create a nested suite for only one return + + +class test(Command): + + """Command to run unit tests after in-place build""" + + description = "run unit tests after in-place build" + + user_options = [ + ('test-module=','m', "Run 'test_suite' in specified module"), + ('test-suite=','s', + "Test suite to run (e.g. 'some_module.test_suite')"), + ] + + def initialize_options(self): + self.test_suite = None + self.test_module = None + self.test_loader = None + + + def finalize_options(self): + + if self.test_suite is None: + if self.test_module is None: + self.test_suite = self.distribution.test_suite + else: + self.test_suite = self.test_module+".test_suite" + elif self.test_module: + raise DistutilsOptionError( + "You may specify a module or a suite, but not both" + ) + + self.test_args = [self.test_suite] + + if self.verbose: + self.test_args.insert(0,'--verbose') + if self.test_loader is None: + self.test_loader = getattr(self.distribution,'test_loader',None) + if self.test_loader is None: + self.test_loader = "setuptools.command.test:ScanningLoader" + + + + def with_project_on_sys_path(self, func): + if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False): + # If we run 2to3 we can not do this inplace: + + # Ensure metadata is up-to-date + self.reinitialize_command('build_py', inplace=0) + self.run_command('build_py') + bpy_cmd = self.get_finalized_command("build_py") + build_path = normalize_path(bpy_cmd.build_lib) + + # Build extensions + self.reinitialize_command('egg_info', egg_base=build_path) + self.run_command('egg_info') + + self.reinitialize_command('build_ext', inplace=0) + self.run_command('build_ext') + else: + # Without 2to3 inplace works fine: + self.run_command('egg_info') + + # Build extensions in-place + self.reinitialize_command('build_ext', inplace=1) + self.run_command('build_ext') + + ei_cmd = self.get_finalized_command("egg_info") + + old_path = sys.path[:] + old_modules = sys.modules.copy() + + try: + sys.path.insert(0, normalize_path(ei_cmd.egg_base)) + working_set.__init__() + add_activation_listener(lambda dist: dist.activate()) + require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version)) + func() + finally: + sys.path[:] = old_path + sys.modules.clear() + sys.modules.update(old_modules) + working_set.__init__() + + + def run(self): + if self.distribution.install_requires: + self.distribution.fetch_build_eggs(self.distribution.install_requires) + if self.distribution.tests_require: + self.distribution.fetch_build_eggs(self.distribution.tests_require) + + if self.test_suite: + cmd = ' '.join(self.test_args) + if self.dry_run: + self.announce('skipping "unittest %s" (dry run)' % cmd) + else: + self.announce('running "unittest %s"' % cmd) + self.with_project_on_sys_path(self.run_tests) + + + def run_tests(self): + import unittest + + # Purge modules under test from sys.modules. The test loader will + # re-import them from the build location. Required when 2to3 is used + # with namespace packages. + if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False): + module = self.test_args[-1].split('.')[0] + if module in _namespace_packages: + del_modules = [] + if module in sys.modules: + del_modules.append(module) + module += '.' + for name in sys.modules: + if name.startswith(module): + del_modules.append(name) + list(map(sys.modules.__delitem__, del_modules)) + + loader_ep = EntryPoint.parse("x="+self.test_loader) + loader_class = loader_ep.load(require=False) + cks = loader_class() + unittest.main( + None, None, [unittest.__file__]+self.test_args, + testLoader = cks + ) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/awx/lib/site-packages/setuptools/command/upload.py b/awx/lib/site-packages/setuptools/command/upload.py new file mode 100644 index 0000000000..a6eff3855b --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/upload.py @@ -0,0 +1,183 @@ +"""distutils.command.upload + +Implements the Distutils 'upload' subcommand (upload package to PyPI).""" + +from distutils import errors +from distutils import log +from distutils.core import Command +from distutils.spawn import spawn +try: + from hashlib import md5 +except ImportError: + from md5 import md5 +import os +import sys +import socket +import platform +import base64 + +from setuptools.compat import urlparse, StringIO, httplib, ConfigParser + +class upload(Command): + + description = "upload binary package to PyPI" + + DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi' + + user_options = [ + ('repository=', 'r', + "url of repository [default: %s]" % DEFAULT_REPOSITORY), + ('show-response', None, + 'display full response text from server'), + ('sign', 's', + 'sign files to upload using gpg'), + ('identity=', 'i', 'GPG identity used to sign files'), + ] + boolean_options = ['show-response', 'sign'] + + def initialize_options(self): + self.username = '' + self.password = '' + self.repository = '' + self.show_response = 0 + self.sign = False + self.identity = None + + def finalize_options(self): + if self.identity and not self.sign: + raise errors.DistutilsOptionError( + "Must use --sign for --identity to have meaning" + ) + if 'HOME' in os.environ: + rc = os.path.join(os.environ['HOME'], '.pypirc') + if os.path.exists(rc): + self.announce('Using PyPI login from %s' % rc) + config = ConfigParser.ConfigParser({ + 'username':'', + 'password':'', + 'repository':''}) + config.read(rc) + if not self.repository: + self.repository = config.get('server-login', 'repository') + if not self.username: + self.username = config.get('server-login', 'username') + if not self.password: + self.password = config.get('server-login', 'password') + if not self.repository: + self.repository = self.DEFAULT_REPOSITORY + + def run(self): + if not self.distribution.dist_files: + raise errors.DistutilsOptionError("No dist file created in earlier command") + for command, pyversion, filename in self.distribution.dist_files: + self.upload_file(command, pyversion, filename) + + def upload_file(self, command, pyversion, filename): + # Sign if requested + if self.sign: + gpg_args = ["gpg", "--detach-sign", "-a", filename] + if self.identity: + gpg_args[2:2] = ["--local-user", self.identity] + spawn(gpg_args, + dry_run=self.dry_run) + + # Fill in the data + f = open(filename,'rb') + content = f.read() + f.close() + basename = os.path.basename(filename) + comment = '' + if command=='bdist_egg' and self.distribution.has_ext_modules(): + comment = "built on %s" % platform.platform(terse=1) + data = { + ':action':'file_upload', + 'protocol_version':'1', + 'name':self.distribution.get_name(), + 'version':self.distribution.get_version(), + 'content':(basename,content), + 'filetype':command, + 'pyversion':pyversion, + 'md5_digest':md5(content).hexdigest(), + } + if command == 'bdist_rpm': + dist, version, id = platform.dist() + if dist: + comment = 'built for %s %s' % (dist, version) + elif command == 'bdist_dumb': + comment = 'built for %s' % platform.platform(terse=1) + data['comment'] = comment + + if self.sign: + asc_file = open(filename + ".asc") + data['gpg_signature'] = (os.path.basename(filename) + ".asc", asc_file.read()) + asc_file.close() + + # set up the authentication + auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip() + + # Build up the MIME payload for the POST data + boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = '\n--' + boundary + end_boundary = sep_boundary + '--' + body = StringIO() + for key, value in data.items(): + # handle multiple entries for the same name + if not isinstance(value, list): + value = [value] + for value in value: + if type(value) is tuple: + fn = ';filename="%s"' % value[0] + value = value[1] + else: + fn = "" + value = str(value) + body.write(sep_boundary) + body.write('\nContent-Disposition: form-data; name="%s"'%key) + body.write(fn) + body.write("\n\n") + body.write(value) + if value and value[-1] == '\r': + body.write('\n') # write an extra newline (lurve Macs) + body.write(end_boundary) + body.write("\n") + body = body.getvalue() + + self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) + + # build the Request + # We can't use urllib2 since we need to send the Basic + # auth right with the first request + schema, netloc, url, params, query, fragments = \ + urlparse(self.repository) + assert not params and not query and not fragments + if schema == 'http': + http = httplib.HTTPConnection(netloc) + elif schema == 'https': + http = httplib.HTTPSConnection(netloc) + else: + raise AssertionError("unsupported schema " + schema) + + data = '' + try: + http.connect() + http.putrequest("POST", url) + http.putheader('Content-type', + 'multipart/form-data; boundary=%s'%boundary) + http.putheader('Content-length', str(len(body))) + http.putheader('Authorization', auth) + http.endheaders() + http.send(body) + except socket.error: + e = sys.exc_info()[1] + self.announce(str(e), log.ERROR) + return + + r = http.getresponse() + if r.status == 200: + self.announce('Server response (%s): %s' % (r.status, r.reason), + log.INFO) + else: + self.announce('Upload failed (%s): %s' % (r.status, r.reason), + log.ERROR) + if self.show_response: + print('-'*75, r.read(), '-'*75) diff --git a/awx/lib/site-packages/setuptools/command/upload_docs.py b/awx/lib/site-packages/setuptools/command/upload_docs.py new file mode 100644 index 0000000000..12bc916bb1 --- /dev/null +++ b/awx/lib/site-packages/setuptools/command/upload_docs.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +"""upload_docs + +Implements a Distutils 'upload_docs' subcommand (upload documentation to +PyPI's pythonhosted.org). +""" + +import os +import socket +import zipfile +import tempfile +import sys +import shutil + +from base64 import standard_b64encode +from pkg_resources import iter_entry_points + +from distutils import log +from distutils.errors import DistutilsOptionError + +try: + from distutils.command.upload import upload +except ImportError: + from setuptools.command.upload import upload + +from setuptools.compat import httplib, urlparse, unicode, iteritems + +_IS_PYTHON3 = sys.version > '3' + +if _IS_PYTHON3: + errors = 'surrogateescape' +else: + errors = 'strict' + + +# This is not just a replacement for byte literals +# but works as a general purpose encoder +def b(s, encoding='utf-8'): + if isinstance(s, unicode): + return s.encode(encoding, errors) + return s + + +class upload_docs(upload): + + description = 'Upload documentation to PyPI' + + user_options = [ + ('repository=', 'r', + "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY), + ('show-response', None, + 'display full response text from server'), + ('upload-dir=', None, 'directory to upload'), + ] + boolean_options = upload.boolean_options + + def has_sphinx(self): + if self.upload_dir is None: + for ep in iter_entry_points('distutils.commands', 'build_sphinx'): + return True + + sub_commands = [('build_sphinx', has_sphinx)] + + def initialize_options(self): + upload.initialize_options(self) + self.upload_dir = None + self.target_dir = None + + def finalize_options(self): + upload.finalize_options(self) + if self.upload_dir is None: + if self.has_sphinx(): + build_sphinx = self.get_finalized_command('build_sphinx') + self.target_dir = build_sphinx.builder_target_dir + else: + build = self.get_finalized_command('build') + self.target_dir = os.path.join(build.build_base, 'docs') + else: + self.ensure_dirname('upload_dir') + self.target_dir = self.upload_dir + self.announce('Using upload directory %s' % self.target_dir) + + def create_zipfile(self, filename): + zip_file = zipfile.ZipFile(filename, "w") + try: + self.mkpath(self.target_dir) # just in case + for root, dirs, files in os.walk(self.target_dir): + if root == self.target_dir and not files: + raise DistutilsOptionError( + "no files found in upload directory '%s'" + % self.target_dir) + for name in files: + full = os.path.join(root, name) + relative = root[len(self.target_dir):].lstrip(os.path.sep) + dest = os.path.join(relative, name) + zip_file.write(full, dest) + finally: + zip_file.close() + + def run(self): + # Run sub commands + for cmd_name in self.get_sub_commands(): + self.run_command(cmd_name) + + tmp_dir = tempfile.mkdtemp() + name = self.distribution.metadata.get_name() + zip_file = os.path.join(tmp_dir, "%s.zip" % name) + try: + self.create_zipfile(zip_file) + self.upload_file(zip_file) + finally: + shutil.rmtree(tmp_dir) + + def upload_file(self, filename): + f = open(filename, 'rb') + content = f.read() + f.close() + meta = self.distribution.metadata + data = { + ':action': 'doc_upload', + 'name': meta.get_name(), + 'content': (os.path.basename(filename), content), + } + # set up the authentication + credentials = b(self.username + ':' + self.password) + credentials = standard_b64encode(credentials) + if sys.version_info >= (3,): + credentials = credentials.decode('ascii') + auth = "Basic " + credentials + + # Build up the MIME payload for the POST data + boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = b('\n--') + b(boundary) + end_boundary = sep_boundary + b('--') + body = [] + for key, values in iteritems(data): + title = '\nContent-Disposition: form-data; name="%s"' % key + # handle multiple entries for the same name + if type(values) != type([]): + values = [values] + for value in values: + if type(value) is tuple: + title += '; filename="%s"' % value[0] + value = value[1] + else: + value = b(value) + body.append(sep_boundary) + body.append(b(title)) + body.append(b("\n\n")) + body.append(value) + if value and value[-1:] == b('\r'): + body.append(b('\n')) # write an extra newline (lurve Macs) + body.append(end_boundary) + body.append(b("\n")) + body = b('').join(body) + + self.announce("Submitting documentation to %s" % (self.repository), + log.INFO) + + # build the Request + # We can't use urllib2 since we need to send the Basic + # auth right with the first request + schema, netloc, url, params, query, fragments = \ + urlparse(self.repository) + assert not params and not query and not fragments + if schema == 'http': + conn = httplib.HTTPConnection(netloc) + elif schema == 'https': + conn = httplib.HTTPSConnection(netloc) + else: + raise AssertionError("unsupported schema "+schema) + + data = '' + loglevel = log.INFO + try: + conn.connect() + conn.putrequest("POST", url) + conn.putheader('Content-type', + 'multipart/form-data; boundary=%s'%boundary) + conn.putheader('Content-length', str(len(body))) + conn.putheader('Authorization', auth) + conn.endheaders() + conn.send(body) + except socket.error: + e = sys.exc_info()[1] + self.announce(str(e), log.ERROR) + return + + r = conn.getresponse() + if r.status == 200: + self.announce('Server response (%s): %s' % (r.status, r.reason), + log.INFO) + elif r.status == 301: + location = r.getheader('Location') + if location is None: + location = 'https://pythonhosted.org/%s/' % meta.get_name() + self.announce('Upload successful. Visit %s' % location, + log.INFO) + else: + self.announce('Upload failed (%s): %s' % (r.status, r.reason), + log.ERROR) + if self.show_response: + print('-'*75, r.read(), '-'*75) diff --git a/awx/lib/site-packages/setuptools/compat.py b/awx/lib/site-packages/setuptools/compat.py new file mode 100644 index 0000000000..529a5fbc4d --- /dev/null +++ b/awx/lib/site-packages/setuptools/compat.py @@ -0,0 +1,97 @@ +import sys +import itertools + +if sys.version_info[0] < 3: + PY3 = False + + basestring = basestring + import __builtin__ as builtins + import ConfigParser + from StringIO import StringIO + BytesIO = StringIO + execfile = execfile + func_code = lambda o: o.func_code + func_globals = lambda o: o.func_globals + im_func = lambda o: o.im_func + from htmlentitydefs import name2codepoint + import httplib + from BaseHTTPServer import HTTPServer + from SimpleHTTPServer import SimpleHTTPRequestHandler + from BaseHTTPServer import BaseHTTPRequestHandler + iteritems = lambda o: o.iteritems() + long_type = long + maxsize = sys.maxint + next = lambda o: o.next() + numeric_types = (int, long, float) + reduce = reduce + unichr = unichr + unicode = unicode + from urllib import url2pathname, splittag + import urllib2 + from urllib2 import urlopen, HTTPError, URLError, unquote, splituser + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit + xrange = xrange + filterfalse = itertools.ifilterfalse + + def exec_(code, globs=None, locs=None): + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb""") +else: + PY3 = True + + basestring = str + import builtins + import configparser as ConfigParser + exec_ = eval('exec') + from io import StringIO, BytesIO + func_code = lambda o: o.__code__ + func_globals = lambda o: o.__globals__ + im_func = lambda o: o.__func__ + from html.entities import name2codepoint + import http.client as httplib + from http.server import HTTPServer, SimpleHTTPRequestHandler + from http.server import BaseHTTPRequestHandler + iteritems = lambda o: o.items() + long_type = int + maxsize = sys.maxsize + next = next + numeric_types = (int, float) + from functools import reduce + unichr = chr + unicode = str + from urllib.error import HTTPError, URLError + import urllib.request as urllib2 + from urllib.request import urlopen, url2pathname + from urllib.parse import ( + urlparse, urlunparse, unquote, splituser, urljoin, urlsplit, + urlunsplit, splittag, + ) + xrange = range + filterfalse = itertools.filterfalse + + def execfile(fn, globs=None, locs=None): + if globs is None: + globs = globals() + if locs is None: + locs = globs + f = open(fn, 'rb') + try: + source = f.read() + finally: + f.close() + exec_(compile(source, fn, 'exec'), globs, locs) + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value diff --git a/awx/lib/site-packages/setuptools/depends.py b/awx/lib/site-packages/setuptools/depends.py new file mode 100644 index 0000000000..8b9d1217b1 --- /dev/null +++ b/awx/lib/site-packages/setuptools/depends.py @@ -0,0 +1,246 @@ +from __future__ import generators +import sys, imp, marshal +from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN +from distutils.version import StrictVersion, LooseVersion + +__all__ = [ + 'Require', 'find_module', 'get_module_constant', 'extract_constant' +] + +class Require: + """A prerequisite to building or installing a distribution""" + + def __init__(self,name,requested_version,module,homepage='', + attribute=None,format=None + ): + + if format is None and requested_version is not None: + format = StrictVersion + + if format is not None: + requested_version = format(requested_version) + if attribute is None: + attribute = '__version__' + + self.__dict__.update(locals()) + del self.self + + + def full_name(self): + """Return full package/distribution name, w/version""" + if self.requested_version is not None: + return '%s-%s' % (self.name,self.requested_version) + return self.name + + + def version_ok(self,version): + """Is 'version' sufficiently up-to-date?""" + return self.attribute is None or self.format is None or \ + str(version) != "unknown" and version >= self.requested_version + + + def get_version(self, paths=None, default="unknown"): + + """Get version number of installed module, 'None', or 'default' + + Search 'paths' for module. If not found, return 'None'. If found, + return the extracted version attribute, or 'default' if no version + attribute was specified, or the value cannot be determined without + importing the module. The version is formatted according to the + requirement's version format (if any), unless it is 'None' or the + supplied 'default'. + """ + + if self.attribute is None: + try: + f,p,i = find_module(self.module,paths) + if f: f.close() + return default + except ImportError: + return None + + v = get_module_constant(self.module,self.attribute,default,paths) + + if v is not None and v is not default and self.format is not None: + return self.format(v) + + return v + + + def is_present(self,paths=None): + """Return true if dependency is present on 'paths'""" + return self.get_version(paths) is not None + + + def is_current(self,paths=None): + """Return true if dependency is present and up-to-date on 'paths'""" + version = self.get_version(paths) + if version is None: + return False + return self.version_ok(version) + + +def _iter_code(code): + + """Yield '(op,arg)' pair for each operation in code object 'code'""" + + from array import array + from dis import HAVE_ARGUMENT, EXTENDED_ARG + + bytes = array('b',code.co_code) + eof = len(code.co_code) + + ptr = 0 + extended_arg = 0 + + while ptr<eof: + + op = bytes[ptr] + + if op>=HAVE_ARGUMENT: + + arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg + ptr += 3 + + if op==EXTENDED_ARG: + extended_arg = arg * long_type(65536) + continue + + else: + arg = None + ptr += 1 + + yield op,arg + + + + + + + + + + +def find_module(module, paths=None): + """Just like 'imp.find_module()', but with package support""" + + parts = module.split('.') + + while parts: + part = parts.pop(0) + f, path, (suffix,mode,kind) = info = imp.find_module(part, paths) + + if kind==PKG_DIRECTORY: + parts = parts or ['__init__'] + paths = [path] + + elif parts: + raise ImportError("Can't find %r in %s" % (parts,module)) + + return info + + + + + + + + + + + + + + + + + + + + + + + + +def get_module_constant(module, symbol, default=-1, paths=None): + + """Find 'module' by searching 'paths', and extract 'symbol' + + Return 'None' if 'module' does not exist on 'paths', or it does not define + 'symbol'. If the module defines 'symbol' as a constant, return the + constant. Otherwise, return 'default'.""" + + try: + f, path, (suffix,mode,kind) = find_module(module,paths) + except ImportError: + # Module doesn't exist + return None + + try: + if kind==PY_COMPILED: + f.read(8) # skip magic & date + code = marshal.load(f) + elif kind==PY_FROZEN: + code = imp.get_frozen_object(module) + elif kind==PY_SOURCE: + code = compile(f.read(), path, 'exec') + else: + # Not something we can parse; we'll have to import it. :( + if module not in sys.modules: + imp.load_module(module,f,path,(suffix,mode,kind)) + return getattr(sys.modules[module],symbol,None) + + finally: + if f: + f.close() + + return extract_constant(code,symbol,default) + + + + + + + + +def extract_constant(code,symbol,default=-1): + """Extract the constant value of 'symbol' from 'code' + + If the name 'symbol' is bound to a constant value by the Python code + object 'code', return that value. If 'symbol' is bound to an expression, + return 'default'. Otherwise, return 'None'. + + Return value is based on the first assignment to 'symbol'. 'symbol' must + be a global, or at least a non-"fast" local in the code block. That is, + only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' + must be present in 'code.co_names'. + """ + + if symbol not in code.co_names: + # name's not there, can't possibly be an assigment + return None + + name_idx = list(code.co_names).index(symbol) + + STORE_NAME = 90 + STORE_GLOBAL = 97 + LOAD_CONST = 100 + + const = default + + for op, arg in _iter_code(code): + + if op==LOAD_CONST: + const = code.co_consts[arg] + elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL): + return const + else: + const = default + +if sys.platform.startswith('java') or sys.platform == 'cli': + # XXX it'd be better to test assertions about bytecode instead... + del extract_constant, get_module_constant + __all__.remove('extract_constant') + __all__.remove('get_module_constant') + + diff --git a/awx/lib/site-packages/setuptools/dist.py b/awx/lib/site-packages/setuptools/dist.py new file mode 100644 index 0000000000..c5b02f99f4 --- /dev/null +++ b/awx/lib/site-packages/setuptools/dist.py @@ -0,0 +1,796 @@ +__all__ = ['Distribution'] + +import re +import os +import sys +import warnings +import distutils.log +import distutils.core +import distutils.cmd +from distutils.core import Distribution as _Distribution +from distutils.errors import (DistutilsOptionError, DistutilsPlatformError, + DistutilsSetupError) + +from setuptools.depends import Require +from setuptools.compat import numeric_types, basestring +import pkg_resources + +def _get_unpatched(cls): + """Protect against re-patching the distutils if reloaded + + Also ensures that no other distutils extension monkeypatched the distutils + first. + """ + while cls.__module__.startswith('setuptools'): + cls, = cls.__bases__ + if not cls.__module__.startswith('distutils'): + raise AssertionError( + "distutils has already been patched by %r" % cls + ) + return cls + +_Distribution = _get_unpatched(_Distribution) + +sequence = tuple, list + +def check_importable(dist, attr, value): + try: + ep = pkg_resources.EntryPoint.parse('x='+value) + assert not ep.extras + except (TypeError,ValueError,AttributeError,AssertionError): + raise DistutilsSetupError( + "%r must be importable 'module:attrs' string (got %r)" + % (attr,value) + ) + + +def assert_string_list(dist, attr, value): + """Verify that value is a string list or None""" + try: + assert ''.join(value)!=value + except (TypeError,ValueError,AttributeError,AssertionError): + raise DistutilsSetupError( + "%r must be a list of strings (got %r)" % (attr,value) + ) +def check_nsp(dist, attr, value): + """Verify that namespace packages are valid""" + assert_string_list(dist,attr,value) + for nsp in value: + if not dist.has_contents_for(nsp): + raise DistutilsSetupError( + "Distribution contains no modules or packages for " + + "namespace package %r" % nsp + ) + if '.' in nsp: + parent = '.'.join(nsp.split('.')[:-1]) + if parent not in value: + distutils.log.warn( + "WARNING: %r is declared as a package namespace, but %r" + " is not: please correct this in setup.py", nsp, parent + ) + +def check_extras(dist, attr, value): + """Verify that extras_require mapping is valid""" + try: + for k,v in value.items(): + if ':' in k: + k,m = k.split(':',1) + if pkg_resources.invalid_marker(m): + raise DistutilsSetupError("Invalid environment marker: "+m) + list(pkg_resources.parse_requirements(v)) + except (TypeError,ValueError,AttributeError): + raise DistutilsSetupError( + "'extras_require' must be a dictionary whose values are " + "strings or lists of strings containing valid project/version " + "requirement specifiers." + ) + +def assert_bool(dist, attr, value): + """Verify that value is True, False, 0, or 1""" + if bool(value) != value: + raise DistutilsSetupError( + "%r must be a boolean value (got %r)" % (attr,value) + ) +def check_requirements(dist, attr, value): + """Verify that install_requires is a valid requirements list""" + try: + list(pkg_resources.parse_requirements(value)) + except (TypeError,ValueError): + raise DistutilsSetupError( + "%r must be a string or list of strings " + "containing valid project/version requirement specifiers" % (attr,) + ) +def check_entry_points(dist, attr, value): + """Verify that entry_points map is parseable""" + try: + pkg_resources.EntryPoint.parse_map(value) + except ValueError: + e = sys.exc_info()[1] + raise DistutilsSetupError(e) + +def check_test_suite(dist, attr, value): + if not isinstance(value,basestring): + raise DistutilsSetupError("test_suite must be a string") + +def check_package_data(dist, attr, value): + """Verify that value is a dictionary of package names to glob lists""" + if isinstance(value,dict): + for k,v in value.items(): + if not isinstance(k,str): break + try: iter(v) + except TypeError: + break + else: + return + raise DistutilsSetupError( + attr+" must be a dictionary mapping package names to lists of " + "wildcard patterns" + ) + +def check_packages(dist, attr, value): + for pkgname in value: + if not re.match(r'\w+(\.\w+)*', pkgname): + distutils.log.warn( + "WARNING: %r not a valid package name; please use only" + ".-separated package names in setup.py", pkgname + ) + + +class Distribution(_Distribution): + """Distribution with support for features, tests, and package data + + This is an enhanced version of 'distutils.dist.Distribution' that + effectively adds the following new optional keyword arguments to 'setup()': + + 'install_requires' -- a string or sequence of strings specifying project + versions that the distribution requires when installed, in the format + used by 'pkg_resources.require()'. They will be installed + automatically when the package is installed. If you wish to use + packages that are not available in PyPI, or want to give your users an + alternate download location, you can add a 'find_links' option to the + '[easy_install]' section of your project's 'setup.cfg' file, and then + setuptools will scan the listed web pages for links that satisfy the + requirements. + + 'extras_require' -- a dictionary mapping names of optional "extras" to the + additional requirement(s) that using those extras incurs. For example, + this:: + + extras_require = dict(reST = ["docutils>=0.3", "reSTedit"]) + + indicates that the distribution can optionally provide an extra + capability called "reST", but it can only be used if docutils and + reSTedit are installed. If the user installs your package using + EasyInstall and requests one of your extras, the corresponding + additional requirements will be installed if needed. + + 'features' **deprecated** -- a dictionary mapping option names to + 'setuptools.Feature' + objects. Features are a portion of the distribution that can be + included or excluded based on user options, inter-feature dependencies, + and availability on the current system. Excluded features are omitted + from all setup commands, including source and binary distributions, so + you can create multiple distributions from the same source tree. + Feature names should be valid Python identifiers, except that they may + contain the '-' (minus) sign. Features can be included or excluded + via the command line options '--with-X' and '--without-X', where 'X' is + the name of the feature. Whether a feature is included by default, and + whether you are allowed to control this from the command line, is + determined by the Feature object. See the 'Feature' class for more + information. + + 'test_suite' -- the name of a test suite to run for the 'test' command. + If the user runs 'python setup.py test', the package will be installed, + and the named test suite will be run. The format is the same as + would be used on a 'unittest.py' command line. That is, it is the + dotted name of an object to import and call to generate a test suite. + + 'package_data' -- a dictionary mapping package names to lists of filenames + or globs to use to find data files contained in the named packages. + If the dictionary has filenames or globs listed under '""' (the empty + string), those names will be searched for in every package, in addition + to any names for the specific package. Data files found using these + names/globs will be installed along with the package, in the same + location as the package. Note that globs are allowed to reference + the contents of non-package subdirectories, as long as you use '/' as + a path separator. (Globs are automatically converted to + platform-specific paths at runtime.) + + In addition to these new keywords, this class also has several new methods + for manipulating the distribution's contents. For example, the 'include()' + and 'exclude()' methods can be thought of as in-place add and subtract + commands that add or remove packages, modules, extensions, and so on from + the distribution. They are used by the feature subsystem to configure the + distribution for the included and excluded features. + """ + + _patched_dist = None + + def patch_missing_pkg_info(self, attrs): + # Fake up a replacement for the data that would normally come from + # PKG-INFO, but which might not yet be built if this is a fresh + # checkout. + # + if not attrs or 'name' not in attrs or 'version' not in attrs: + return + key = pkg_resources.safe_name(str(attrs['name'])).lower() + dist = pkg_resources.working_set.by_key.get(key) + if dist is not None and not dist.has_metadata('PKG-INFO'): + dist._version = pkg_resources.safe_version(str(attrs['version'])) + self._patched_dist = dist + + def __init__(self, attrs=None): + have_package_data = hasattr(self, "package_data") + if not have_package_data: + self.package_data = {} + _attrs_dict = attrs or {} + if 'features' in _attrs_dict or 'require_features' in _attrs_dict: + Feature.warn_deprecated() + self.require_features = [] + self.features = {} + self.dist_files = [] + self.src_root = attrs and attrs.pop("src_root", None) + self.patch_missing_pkg_info(attrs) + # Make sure we have any eggs needed to interpret 'attrs' + if attrs is not None: + self.dependency_links = attrs.pop('dependency_links', []) + assert_string_list(self,'dependency_links',self.dependency_links) + if attrs and 'setup_requires' in attrs: + self.fetch_build_eggs(attrs.pop('setup_requires')) + for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): + if not hasattr(self,ep.name): + setattr(self,ep.name,None) + _Distribution.__init__(self,attrs) + if isinstance(self.metadata.version, numeric_types): + # Some people apparently take "version number" too literally :) + self.metadata.version = str(self.metadata.version) + + def parse_command_line(self): + """Process features after parsing command line options""" + result = _Distribution.parse_command_line(self) + if self.features: + self._finalize_features() + return result + + def _feature_attrname(self,name): + """Convert feature name to corresponding option attribute name""" + return 'with_'+name.replace('-','_') + + def fetch_build_eggs(self, requires): + """Resolve pre-setup requirements""" + from pkg_resources import working_set, parse_requirements + for dist in working_set.resolve( + parse_requirements(requires), installer=self.fetch_build_egg + ): + working_set.add(dist) + + def finalize_options(self): + _Distribution.finalize_options(self) + if self.features: + self._set_global_opts_from_features() + + for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): + value = getattr(self,ep.name,None) + if value is not None: + ep.require(installer=self.fetch_build_egg) + ep.load()(self, ep.name, value) + if getattr(self, 'convert_2to3_doctests', None): + # XXX may convert to set here when we can rely on set being builtin + self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests] + else: + self.convert_2to3_doctests = [] + + def fetch_build_egg(self, req): + """Fetch an egg needed for building""" + + try: + cmd = self._egg_fetcher + cmd.package_index.to_scan = [] + except AttributeError: + from setuptools.command.easy_install import easy_install + dist = self.__class__({'script_args':['easy_install']}) + dist.parse_config_files() + opts = dist.get_option_dict('easy_install') + keep = ( + 'find_links', 'site_dirs', 'index_url', 'optimize', + 'site_dirs', 'allow_hosts' + ) + for key in opts.keys(): + if key not in keep: + del opts[key] # don't use any other settings + if self.dependency_links: + links = self.dependency_links[:] + if 'find_links' in opts: + links = opts['find_links'][1].split() + links + opts['find_links'] = ('setup', links) + cmd = easy_install( + dist, args=["x"], install_dir=os.curdir, exclude_scripts=True, + always_copy=False, build_directory=None, editable=False, + upgrade=False, multi_version=True, no_report=True, user=False + ) + cmd.ensure_finalized() + self._egg_fetcher = cmd + return cmd.easy_install(req) + + def _set_global_opts_from_features(self): + """Add --with-X/--without-X options based on optional features""" + + go = [] + no = self.negative_opt.copy() + + for name,feature in self.features.items(): + self._set_feature(name,None) + feature.validate(self) + + if feature.optional: + descr = feature.description + incdef = ' (default)' + excdef='' + if not feature.include_by_default(): + excdef, incdef = incdef, excdef + + go.append(('with-'+name, None, 'include '+descr+incdef)) + go.append(('without-'+name, None, 'exclude '+descr+excdef)) + no['without-'+name] = 'with-'+name + + self.global_options = self.feature_options = go + self.global_options + self.negative_opt = self.feature_negopt = no + + def _finalize_features(self): + """Add/remove features and resolve dependencies between them""" + + # First, flag all the enabled items (and thus their dependencies) + for name,feature in self.features.items(): + enabled = self.feature_is_included(name) + if enabled or (enabled is None and feature.include_by_default()): + feature.include_in(self) + self._set_feature(name,1) + + # Then disable the rest, so that off-by-default features don't + # get flagged as errors when they're required by an enabled feature + for name,feature in self.features.items(): + if not self.feature_is_included(name): + feature.exclude_from(self) + self._set_feature(name,0) + + def get_command_class(self, command): + """Pluggable version of get_command_class()""" + if command in self.cmdclass: + return self.cmdclass[command] + + for ep in pkg_resources.iter_entry_points('distutils.commands',command): + ep.require(installer=self.fetch_build_egg) + self.cmdclass[command] = cmdclass = ep.load() + return cmdclass + else: + return _Distribution.get_command_class(self, command) + + def print_commands(self): + for ep in pkg_resources.iter_entry_points('distutils.commands'): + if ep.name not in self.cmdclass: + cmdclass = ep.load(False) # don't require extras, we're not running + self.cmdclass[ep.name] = cmdclass + return _Distribution.print_commands(self) + + def _set_feature(self,name,status): + """Set feature's inclusion status""" + setattr(self,self._feature_attrname(name),status) + + def feature_is_included(self,name): + """Return 1 if feature is included, 0 if excluded, 'None' if unknown""" + return getattr(self,self._feature_attrname(name)) + + def include_feature(self,name): + """Request inclusion of feature named 'name'""" + + if self.feature_is_included(name)==0: + descr = self.features[name].description + raise DistutilsOptionError( + descr + " is required, but was excluded or is not available" + ) + self.features[name].include_in(self) + self._set_feature(name,1) + + def include(self,**attrs): + """Add items to distribution that are named in keyword arguments + + For example, 'dist.exclude(py_modules=["x"])' would add 'x' to + the distribution's 'py_modules' attribute, if it was not already + there. + + Currently, this method only supports inclusion for attributes that are + lists or tuples. If you need to add support for adding to other + attributes in this or a subclass, you can add an '_include_X' method, + where 'X' is the name of the attribute. The method will be called with + the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' + will try to call 'dist._include_foo({"bar":"baz"})', which can then + handle whatever special inclusion logic is needed. + """ + for k,v in attrs.items(): + include = getattr(self, '_include_'+k, None) + if include: + include(v) + else: + self._include_misc(k,v) + + def exclude_package(self,package): + """Remove packages, modules, and extensions in named package""" + + pfx = package+'.' + if self.packages: + self.packages = [ + p for p in self.packages + if p != package and not p.startswith(pfx) + ] + + if self.py_modules: + self.py_modules = [ + p for p in self.py_modules + if p != package and not p.startswith(pfx) + ] + + if self.ext_modules: + self.ext_modules = [ + p for p in self.ext_modules + if p.name != package and not p.name.startswith(pfx) + ] + + def has_contents_for(self,package): + """Return true if 'exclude_package(package)' would do something""" + + pfx = package+'.' + + for p in self.iter_distribution_names(): + if p==package or p.startswith(pfx): + return True + + def _exclude_misc(self,name,value): + """Handle 'exclude()' for list/tuple attrs without a special handler""" + if not isinstance(value,sequence): + raise DistutilsSetupError( + "%s: setting must be a list or tuple (%r)" % (name, value) + ) + try: + old = getattr(self,name) + except AttributeError: + raise DistutilsSetupError( + "%s: No such distribution setting" % name + ) + if old is not None and not isinstance(old,sequence): + raise DistutilsSetupError( + name+": this setting cannot be changed via include/exclude" + ) + elif old: + setattr(self,name,[item for item in old if item not in value]) + + def _include_misc(self,name,value): + """Handle 'include()' for list/tuple attrs without a special handler""" + + if not isinstance(value,sequence): + raise DistutilsSetupError( + "%s: setting must be a list (%r)" % (name, value) + ) + try: + old = getattr(self,name) + except AttributeError: + raise DistutilsSetupError( + "%s: No such distribution setting" % name + ) + if old is None: + setattr(self,name,value) + elif not isinstance(old,sequence): + raise DistutilsSetupError( + name+": this setting cannot be changed via include/exclude" + ) + else: + setattr(self,name,old+[item for item in value if item not in old]) + + def exclude(self,**attrs): + """Remove items from distribution that are named in keyword arguments + + For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from + the distribution's 'py_modules' attribute. Excluding packages uses + the 'exclude_package()' method, so all of the package's contained + packages, modules, and extensions are also excluded. + + Currently, this method only supports exclusion from attributes that are + lists or tuples. If you need to add support for excluding from other + attributes in this or a subclass, you can add an '_exclude_X' method, + where 'X' is the name of the attribute. The method will be called with + the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})' + will try to call 'dist._exclude_foo({"bar":"baz"})', which can then + handle whatever special exclusion logic is needed. + """ + for k,v in attrs.items(): + exclude = getattr(self, '_exclude_'+k, None) + if exclude: + exclude(v) + else: + self._exclude_misc(k,v) + + def _exclude_packages(self,packages): + if not isinstance(packages,sequence): + raise DistutilsSetupError( + "packages: setting must be a list or tuple (%r)" % (packages,) + ) + list(map(self.exclude_package, packages)) + + def _parse_command_opts(self, parser, args): + # Remove --with-X/--without-X options when processing command args + self.global_options = self.__class__.global_options + self.negative_opt = self.__class__.negative_opt + + # First, expand any aliases + command = args[0] + aliases = self.get_option_dict('aliases') + while command in aliases: + src,alias = aliases[command] + del aliases[command] # ensure each alias can expand only once! + import shlex + args[:1] = shlex.split(alias,True) + command = args[0] + + nargs = _Distribution._parse_command_opts(self, parser, args) + + # Handle commands that want to consume all remaining arguments + cmd_class = self.get_command_class(command) + if getattr(cmd_class,'command_consumes_arguments',None): + self.get_option_dict(command)['args'] = ("command line", nargs) + if nargs is not None: + return [] + + return nargs + + def get_cmdline_options(self): + """Return a '{cmd: {opt:val}}' map of all command-line options + + Option names are all long, but do not include the leading '--', and + contain dashes rather than underscores. If the option doesn't take + an argument (e.g. '--quiet'), the 'val' is 'None'. + + Note that options provided by config files are intentionally excluded. + """ + + d = {} + + for cmd,opts in self.command_options.items(): + + for opt,(src,val) in opts.items(): + + if src != "command line": + continue + + opt = opt.replace('_','-') + + if val==0: + cmdobj = self.get_command_obj(cmd) + neg_opt = self.negative_opt.copy() + neg_opt.update(getattr(cmdobj,'negative_opt',{})) + for neg,pos in neg_opt.items(): + if pos==opt: + opt=neg + val=None + break + else: + raise AssertionError("Shouldn't be able to get here") + + elif val==1: + val = None + + d.setdefault(cmd,{})[opt] = val + + return d + + def iter_distribution_names(self): + """Yield all packages, modules, and extension names in distribution""" + + for pkg in self.packages or (): + yield pkg + + for module in self.py_modules or (): + yield module + + for ext in self.ext_modules or (): + if isinstance(ext,tuple): + name, buildinfo = ext + else: + name = ext.name + if name.endswith('module'): + name = name[:-6] + yield name + + def handle_display_options(self, option_order): + """If there were any non-global "display-only" options + (--help-commands or the metadata display options) on the command + line, display the requested info and return true; else return + false. + """ + import sys + + if sys.version_info < (3,) or self.help_commands: + return _Distribution.handle_display_options(self, option_order) + + # Stdout may be StringIO (e.g. in tests) + import io + if not isinstance(sys.stdout, io.TextIOWrapper): + return _Distribution.handle_display_options(self, option_order) + + # Don't wrap stdout if utf-8 is already the encoding. Provides + # workaround for #334. + if sys.stdout.encoding.lower() in ('utf-8', 'utf8'): + return _Distribution.handle_display_options(self, option_order) + + # Print metadata in UTF-8 no matter the platform + encoding = sys.stdout.encoding + errors = sys.stdout.errors + newline = sys.platform != 'win32' and '\n' or None + line_buffering = sys.stdout.line_buffering + + sys.stdout = io.TextIOWrapper( + sys.stdout.detach(), 'utf-8', errors, newline, line_buffering) + try: + return _Distribution.handle_display_options(self, option_order) + finally: + sys.stdout = io.TextIOWrapper( + sys.stdout.detach(), encoding, errors, newline, line_buffering) + + +# Install it throughout the distutils +for module in distutils.dist, distutils.core, distutils.cmd: + module.Distribution = Distribution + + +class Feature: + """ + **deprecated** -- The `Feature` facility was never completely implemented + or supported, `has reported issues + <https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in + a future version. + + A subset of the distribution that can be excluded if unneeded/wanted + + Features are created using these keyword arguments: + + 'description' -- a short, human readable description of the feature, to + be used in error messages, and option help messages. + + 'standard' -- if true, the feature is included by default if it is + available on the current system. Otherwise, the feature is only + included if requested via a command line '--with-X' option, or if + another included feature requires it. The default setting is 'False'. + + 'available' -- if true, the feature is available for installation on the + current system. The default setting is 'True'. + + 'optional' -- if true, the feature's inclusion can be controlled from the + command line, using the '--with-X' or '--without-X' options. If + false, the feature's inclusion status is determined automatically, + based on 'availabile', 'standard', and whether any other feature + requires it. The default setting is 'True'. + + 'require_features' -- a string or sequence of strings naming features + that should also be included if this feature is included. Defaults to + empty list. May also contain 'Require' objects that should be + added/removed from the distribution. + + 'remove' -- a string or list of strings naming packages to be removed + from the distribution if this feature is *not* included. If the + feature *is* included, this argument is ignored. This argument exists + to support removing features that "crosscut" a distribution, such as + defining a 'tests' feature that removes all the 'tests' subpackages + provided by other features. The default for this argument is an empty + list. (Note: the named package(s) or modules must exist in the base + distribution when the 'setup()' function is initially called.) + + other keywords -- any other keyword arguments are saved, and passed to + the distribution's 'include()' and 'exclude()' methods when the + feature is included or excluded, respectively. So, for example, you + could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be + added or removed from the distribution as appropriate. + + A feature must include at least one 'requires', 'remove', or other + keyword argument. Otherwise, it can't affect the distribution in any way. + Note also that you can subclass 'Feature' to create your own specialized + feature types that modify the distribution in other ways when included or + excluded. See the docstrings for the various methods here for more detail. + Aside from the methods, the only feature attributes that distributions look + at are 'description' and 'optional'. + """ + + @staticmethod + def warn_deprecated(): + warnings.warn( + "Features are deprecated and will be removed in a future " + "version. See http://bitbucket.org/pypa/setuptools/65.", + DeprecationWarning, + stacklevel=3, + ) + + def __init__(self, description, standard=False, available=True, + optional=True, require_features=(), remove=(), **extras): + self.warn_deprecated() + + self.description = description + self.standard = standard + self.available = available + self.optional = optional + if isinstance(require_features,(str,Require)): + require_features = require_features, + + self.require_features = [ + r for r in require_features if isinstance(r,str) + ] + er = [r for r in require_features if not isinstance(r,str)] + if er: extras['require_features'] = er + + if isinstance(remove,str): + remove = remove, + self.remove = remove + self.extras = extras + + if not remove and not require_features and not extras: + raise DistutilsSetupError( + "Feature %s: must define 'require_features', 'remove', or at least one" + " of 'packages', 'py_modules', etc." + ) + + def include_by_default(self): + """Should this feature be included by default?""" + return self.available and self.standard + + def include_in(self,dist): + + """Ensure feature and its requirements are included in distribution + + You may override this in a subclass to perform additional operations on + the distribution. Note that this method may be called more than once + per feature, and so should be idempotent. + + """ + + if not self.available: + raise DistutilsPlatformError( + self.description+" is required," + "but is not available on this platform" + ) + + dist.include(**self.extras) + + for f in self.require_features: + dist.include_feature(f) + + def exclude_from(self,dist): + + """Ensure feature is excluded from distribution + + You may override this in a subclass to perform additional operations on + the distribution. This method will be called at most once per + feature, and only after all included features have been asked to + include themselves. + """ + + dist.exclude(**self.extras) + + if self.remove: + for item in self.remove: + dist.exclude_package(item) + + def validate(self,dist): + + """Verify that feature makes sense in context of distribution + + This method is called by the distribution just before it parses its + command line. It checks to ensure that the 'remove' attribute, if any, + contains only valid package/module names that are present in the base + distribution when 'setup()' is called. You may override it in a + subclass to perform any other required validation of the feature + against a target distribution. + """ + + for item in self.remove: + if not dist.has_contents_for(item): + raise DistutilsSetupError( + "%s wants to be able to remove %s, but the distribution" + " doesn't contain any packages or modules under %s" + % (self.description, item, item) + ) diff --git a/awx/lib/site-packages/setuptools/extension.py b/awx/lib/site-packages/setuptools/extension.py new file mode 100644 index 0000000000..d7892d3d9f --- /dev/null +++ b/awx/lib/site-packages/setuptools/extension.py @@ -0,0 +1,46 @@ +import sys +import distutils.core +import distutils.extension + +from setuptools.dist import _get_unpatched + +_Extension = _get_unpatched(distutils.core.Extension) + +def have_pyrex(): + """ + Return True if Cython or Pyrex can be imported. + """ + pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext' + for pyrex_impl in pyrex_impls: + try: + # from (pyrex_impl) import build_ext + __import__(pyrex_impl, fromlist=['build_ext']).build_ext + return True + except Exception: + pass + return False + + +class Extension(_Extension): + """Extension that uses '.c' files in place of '.pyx' files""" + + def __init__(self, *args, **kw): + _Extension.__init__(self, *args, **kw) + if not have_pyrex(): + self._convert_pyx_sources_to_c() + + def _convert_pyx_sources_to_c(self): + "convert .pyx extensions to .c" + def pyx_to_c(source): + if source.endswith('.pyx'): + source = source[:-4] + '.c' + return source + self.sources = list(map(pyx_to_c, self.sources)) + +class Library(Extension): + """Just like a regular Extension, but built as a library instead""" + +distutils.core.Extension = Extension +distutils.extension.Extension = Extension +if 'distutils.command.build_ext' in sys.modules: + sys.modules['distutils.command.build_ext'].Extension = Extension diff --git a/awx/lib/site-packages/setuptools/package_index.py b/awx/lib/site-packages/setuptools/package_index.py new file mode 100644 index 0000000000..4c9e40a7b1 --- /dev/null +++ b/awx/lib/site-packages/setuptools/package_index.py @@ -0,0 +1,990 @@ +"""PyPI and direct package downloading""" +import sys +import os +import re +import shutil +import socket +import base64 + +from pkg_resources import ( + CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, + require, Environment, find_distributions, safe_name, safe_version, + to_filename, Requirement, DEVELOP_DIST, +) +from setuptools import ssl_support +from distutils import log +from distutils.errors import DistutilsError +from setuptools.compat import (urllib2, httplib, StringIO, HTTPError, + urlparse, urlunparse, unquote, splituser, + url2pathname, name2codepoint, + unichr, urljoin, urlsplit, urlunsplit) +from setuptools.compat import filterfalse +from fnmatch import translate +from setuptools.py24compat import hashlib +from setuptools.py24compat import wraps +from setuptools.py26compat import strip_fragment +from setuptools.py27compat import get_all_headers + +EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$') +HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I) +# this is here to fix emacs' cruddy broken syntax highlighting +PYPI_MD5 = re.compile( + '<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)' + 'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)' +) +URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match +EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() + +__all__ = [ + 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', + 'interpret_distro_name', +] + +_SOCKET_TIMEOUT = 15 + +def parse_bdist_wininst(name): + """Return (base,pyversion) or (None,None) for possible .exe name""" + + lower = name.lower() + base, py_ver, plat = None, None, None + + if lower.endswith('.exe'): + if lower.endswith('.win32.exe'): + base = name[:-10] + plat = 'win32' + elif lower.startswith('.win32-py',-16): + py_ver = name[-7:-4] + base = name[:-16] + plat = 'win32' + elif lower.endswith('.win-amd64.exe'): + base = name[:-14] + plat = 'win-amd64' + elif lower.startswith('.win-amd64-py',-20): + py_ver = name[-7:-4] + base = name[:-20] + plat = 'win-amd64' + return base,py_ver,plat + + +def egg_info_for_url(url): + scheme, server, path, parameters, query, fragment = urlparse(url) + base = unquote(path.split('/')[-1]) + if server=='sourceforge.net' and base=='download': # XXX Yuck + base = unquote(path.split('/')[-2]) + if '#' in base: base, fragment = base.split('#',1) + return base,fragment + +def distros_for_url(url, metadata=None): + """Yield egg or source distribution objects that might be found at a URL""" + base, fragment = egg_info_for_url(url) + for dist in distros_for_location(url, base, metadata): yield dist + if fragment: + match = EGG_FRAGMENT.match(fragment) + if match: + for dist in interpret_distro_name( + url, match.group(1), metadata, precedence = CHECKOUT_DIST + ): + yield dist + +def distros_for_location(location, basename, metadata=None): + """Yield egg or source distribution objects based on basename""" + if basename.endswith('.egg.zip'): + basename = basename[:-4] # strip the .zip + if basename.endswith('.egg') and '-' in basename: + # only one, unambiguous interpretation + return [Distribution.from_location(location, basename, metadata)] + if basename.endswith('.exe'): + win_base, py_ver, platform = parse_bdist_wininst(basename) + if win_base is not None: + return interpret_distro_name( + location, win_base, metadata, py_ver, BINARY_DIST, platform + ) + # Try source distro extensions (.zip, .tgz, etc.) + # + for ext in EXTENSIONS: + if basename.endswith(ext): + basename = basename[:-len(ext)] + return interpret_distro_name(location, basename, metadata) + return [] # no extension matched + +def distros_for_filename(filename, metadata=None): + """Yield possible egg or source distribution objects based on a filename""" + return distros_for_location( + normalize_path(filename), os.path.basename(filename), metadata + ) + + +def interpret_distro_name( + location, basename, metadata, py_version=None, precedence=SOURCE_DIST, + platform=None + ): + """Generate alternative interpretations of a source distro name + + Note: if `location` is a filesystem filename, you should call + ``pkg_resources.normalize_path()`` on it before passing it to this + routine! + """ + # Generate alternative interpretations of a source distro name + # Because some packages are ambiguous as to name/versions split + # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. + # So, we generate each possible interepretation (e.g. "adns, python-1.1.0" + # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, + # the spurious interpretations should be ignored, because in the event + # there's also an "adns" package, the spurious "python-1.1.0" version will + # compare lower than any numeric version number, and is therefore unlikely + # to match a request for it. It's still a potential problem, though, and + # in the long run PyPI and the distutils should go for "safe" names and + # versions in distribution archive names (sdist and bdist). + + parts = basename.split('-') + if not py_version: + for i,p in enumerate(parts[2:]): + if len(p)==5 and p.startswith('py2.'): + return # It's a bdist_dumb, not an sdist -- bail out + + for p in range(1,len(parts)+1): + yield Distribution( + location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), + py_version=py_version, precedence = precedence, + platform = platform + ) + +# From Python 2.7 docs +def unique_everseen(iterable, key=None): + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + # unique_everseen('ABBCcAD', str.lower) --> A B C D + seen = set() + seen_add = seen.add + if key is None: + for element in filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element + +def unique_values(func): + """ + Wrap a function returning an iterable such that the resulting iterable + only ever yields unique items. + """ + @wraps(func) + def wrapper(*args, **kwargs): + return unique_everseen(func(*args, **kwargs)) + return wrapper + +REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) +# this line is here to fix emacs' cruddy broken syntax highlighting + +@unique_values +def find_external_links(url, page): + """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" + + for match in REL.finditer(page): + tag, rel = match.groups() + rels = set(map(str.strip, rel.lower().split(','))) + if 'homepage' in rels or 'download' in rels: + for match in HREF.finditer(tag): + yield urljoin(url, htmldecode(match.group(1))) + + for tag in ("<th>Home Page", "<th>Download URL"): + pos = page.find(tag) + if pos!=-1: + match = HREF.search(page,pos) + if match: + yield urljoin(url, htmldecode(match.group(1))) + +user_agent = "Python-urllib/%s setuptools/%s" % ( + sys.version[:3], require('setuptools')[0].version +) + +class ContentChecker(object): + """ + A null content checker that defines the interface for checking content + """ + def feed(self, block): + """ + Feed a block of data to the hash. + """ + return + + def is_valid(self): + """ + Check the hash. Return False if validation fails. + """ + return True + + def report(self, reporter, template): + """ + Call reporter with information about the checker (hash name) + substituted into the template. + """ + return + +class HashChecker(ContentChecker): + pattern = re.compile( + r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)=' + r'(?P<expected>[a-f0-9]+)' + ) + + def __init__(self, hash_name, expected): + self.hash_name = hash_name + self.hash = hashlib.new(hash_name) + self.expected = expected + + @classmethod + def from_url(cls, url): + "Construct a (possibly null) ContentChecker from a URL" + fragment = urlparse(url)[-1] + if not fragment: + return ContentChecker() + match = cls.pattern.search(fragment) + if not match: + return ContentChecker() + return cls(**match.groupdict()) + + def feed(self, block): + self.hash.update(block) + + def is_valid(self): + return self.hash.hexdigest() == self.expected + + def report(self, reporter, template): + msg = template % self.hash_name + return reporter(msg) + + +class PackageIndex(Environment): + """A distribution index that scans web pages for download URLs""" + + def __init__( + self, index_url="https://pypi.python.org/simple", hosts=('*',), + ca_bundle=None, verify_ssl=True, *args, **kw + ): + Environment.__init__(self,*args,**kw) + self.index_url = index_url + "/"[:not index_url.endswith('/')] + self.scanned_urls = {} + self.fetched_urls = {} + self.package_pages = {} + self.allows = re.compile('|'.join(map(translate,hosts))).match + self.to_scan = [] + if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()): + self.opener = ssl_support.opener_for(ca_bundle) + else: self.opener = urllib2.urlopen + + def process_url(self, url, retrieve=False): + """Evaluate a URL as a possible download, and maybe retrieve it""" + if url in self.scanned_urls and not retrieve: + return + self.scanned_urls[url] = True + if not URL_SCHEME(url): + self.process_filename(url) + return + else: + dists = list(distros_for_url(url)) + if dists: + if not self.url_ok(url): + return + self.debug("Found link: %s", url) + + if dists or not retrieve or url in self.fetched_urls: + list(map(self.add, dists)) + return # don't need the actual page + + if not self.url_ok(url): + self.fetched_urls[url] = True + return + + self.info("Reading %s", url) + self.fetched_urls[url] = True # prevent multiple fetch attempts + f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url) + if f is None: return + self.fetched_urls[f.url] = True + if 'html' not in f.headers.get('content-type', '').lower(): + f.close() # not html, we can't process it + return + + base = f.url # handle redirects + page = f.read() + if not isinstance(page, str): # We are in Python 3 and got bytes. We want str. + if isinstance(f, HTTPError): + # Errors have no charset, assume latin1: + charset = 'latin-1' + else: + charset = f.headers.get_param('charset') or 'latin-1' + page = page.decode(charset, "ignore") + f.close() + for match in HREF.finditer(page): + link = urljoin(base, htmldecode(match.group(1))) + self.process_url(link) + if url.startswith(self.index_url) and getattr(f,'code',None)!=404: + page = self.process_index(url, page) + + def process_filename(self, fn, nested=False): + # process filenames or directories + if not os.path.exists(fn): + self.warn("Not found: %s", fn) + return + + if os.path.isdir(fn) and not nested: + path = os.path.realpath(fn) + for item in os.listdir(path): + self.process_filename(os.path.join(path,item), True) + + dists = distros_for_filename(fn) + if dists: + self.debug("Found: %s", fn) + list(map(self.add, dists)) + + def url_ok(self, url, fatal=False): + s = URL_SCHEME(url) + if (s and s.group(1).lower()=='file') or self.allows(urlparse(url)[1]): + return True + msg = ("\nNote: Bypassing %s (disallowed host; see " + "http://bit.ly/1dg9ijs for details).\n") + if fatal: + raise DistutilsError(msg % url) + else: + self.warn(msg, url) + + def scan_egg_links(self, search_path): + for item in search_path: + if os.path.isdir(item): + for entry in os.listdir(item): + if entry.endswith('.egg-link'): + self.scan_egg_link(item, entry) + + def scan_egg_link(self, path, entry): + lines = [_f for _f in map(str.strip, + open(os.path.join(path, entry))) if _f] + if len(lines)==2: + for dist in find_distributions(os.path.join(path, lines[0])): + dist.location = os.path.join(path, *lines) + dist.precedence = SOURCE_DIST + self.add(dist) + + def process_index(self,url,page): + """Process the contents of a PyPI page""" + def scan(link): + # Process a URL to see if it's for a package page + if link.startswith(self.index_url): + parts = list(map( + unquote, link[len(self.index_url):].split('/') + )) + if len(parts)==2 and '#' not in parts[1]: + # it's a package page, sanitize and index it + pkg = safe_name(parts[0]) + ver = safe_version(parts[1]) + self.package_pages.setdefault(pkg.lower(),{})[link] = True + return to_filename(pkg), to_filename(ver) + return None, None + + # process an index page into the package-page index + for match in HREF.finditer(page): + try: + scan(urljoin(url, htmldecode(match.group(1)))) + except ValueError: + pass + + pkg, ver = scan(url) # ensure this page is in the page index + if pkg: + # process individual package page + for new_url in find_external_links(url, page): + # Process the found URL + base, frag = egg_info_for_url(new_url) + if base.endswith('.py') and not frag: + if ver: + new_url+='#egg=%s-%s' % (pkg,ver) + else: + self.need_version_info(url) + self.scan_url(new_url) + + return PYPI_MD5.sub( + lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page + ) + else: + return "" # no sense double-scanning non-package pages + + def need_version_info(self, url): + self.scan_all( + "Page at %s links to .py file(s) without version info; an index " + "scan is required.", url + ) + + def scan_all(self, msg=None, *args): + if self.index_url not in self.fetched_urls: + if msg: self.warn(msg,*args) + self.info( + "Scanning index of all packages (this may take a while)" + ) + self.scan_url(self.index_url) + + def find_packages(self, requirement): + self.scan_url(self.index_url + requirement.unsafe_name+'/') + + if not self.package_pages.get(requirement.key): + # Fall back to safe version of the name + self.scan_url(self.index_url + requirement.project_name+'/') + + if not self.package_pages.get(requirement.key): + # We couldn't find the target package, so search the index page too + self.not_found_in_index(requirement) + + for url in list(self.package_pages.get(requirement.key,())): + # scan each page that might be related to the desired package + self.scan_url(url) + + def obtain(self, requirement, installer=None): + self.prescan() + self.find_packages(requirement) + for dist in self[requirement.key]: + if dist in requirement: + return dist + self.debug("%s does not match %s", requirement, dist) + return super(PackageIndex, self).obtain(requirement,installer) + + def check_hash(self, checker, filename, tfp): + """ + checker is a ContentChecker + """ + checker.report(self.debug, + "Validating %%s checksum for %s" % filename) + if not checker.is_valid(): + tfp.close() + os.unlink(filename) + raise DistutilsError( + "%s validation failed for %s; " + "possible download problem?" % ( + checker.hash.name, os.path.basename(filename)) + ) + + def add_find_links(self, urls): + """Add `urls` to the list that will be prescanned for searches""" + for url in urls: + if ( + self.to_scan is None # if we have already "gone online" + or not URL_SCHEME(url) # or it's a local file/directory + or url.startswith('file:') + or list(distros_for_url(url)) # or a direct package link + ): + # then go ahead and process it now + self.scan_url(url) + else: + # otherwise, defer retrieval till later + self.to_scan.append(url) + + def prescan(self): + """Scan urls scheduled for prescanning (e.g. --find-links)""" + if self.to_scan: + list(map(self.scan_url, self.to_scan)) + self.to_scan = None # from now on, go ahead and process immediately + + def not_found_in_index(self, requirement): + if self[requirement.key]: # we've seen at least one distro + meth, msg = self.info, "Couldn't retrieve index page for %r" + else: # no distros seen for this name, might be misspelled + meth, msg = (self.warn, + "Couldn't find index page for %r (maybe misspelled?)") + meth(msg, requirement.unsafe_name) + self.scan_all() + + def download(self, spec, tmpdir): + """Locate and/or download `spec` to `tmpdir`, returning a local path + + `spec` may be a ``Requirement`` object, or a string containing a URL, + an existing local filename, or a project/version requirement spec + (i.e. the string form of a ``Requirement`` object). If it is the URL + of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one + that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is + automatically created alongside the downloaded file. + + If `spec` is a ``Requirement`` object or a string containing a + project/version requirement spec, this method returns the location of + a matching distribution (possibly after downloading it to `tmpdir`). + If `spec` is a locally existing file or directory name, it is simply + returned unchanged. If `spec` is a URL, it is downloaded to a subpath + of `tmpdir`, and the local filename is returned. Various errors may be + raised if a problem occurs during downloading. + """ + if not isinstance(spec,Requirement): + scheme = URL_SCHEME(spec) + if scheme: + # It's a url, download it to tmpdir + found = self._download_url(scheme.group(1), spec, tmpdir) + base, fragment = egg_info_for_url(spec) + if base.endswith('.py'): + found = self.gen_setup(found,fragment,tmpdir) + return found + elif os.path.exists(spec): + # Existing file or directory, just return it + return spec + else: + try: + spec = Requirement.parse(spec) + except ValueError: + raise DistutilsError( + "Not a URL, existing file, or requirement spec: %r" % + (spec,) + ) + return getattr(self.fetch_distribution(spec, tmpdir),'location',None) + + def fetch_distribution( + self, requirement, tmpdir, force_scan=False, source=False, + develop_ok=False, local_index=None + ): + """Obtain a distribution suitable for fulfilling `requirement` + + `requirement` must be a ``pkg_resources.Requirement`` instance. + If necessary, or if the `force_scan` flag is set, the requirement is + searched for in the (online) package index as well as the locally + installed packages. If a distribution matching `requirement` is found, + the returned distribution's ``location`` is the value you would have + gotten from calling the ``download()`` method with the matching + distribution's URL or filename. If no matching distribution is found, + ``None`` is returned. + + If the `source` flag is set, only source distributions and source + checkout links will be considered. Unless the `develop_ok` flag is + set, development and system eggs (i.e., those using the ``.egg-info`` + format) will be ignored. + """ + # process a Requirement + self.info("Searching for %s", requirement) + skipped = {} + dist = None + + def find(req, env=None): + if env is None: + env = self + # Find a matching distribution; may be called more than once + + for dist in env[req.key]: + + if dist.precedence==DEVELOP_DIST and not develop_ok: + if dist not in skipped: + self.warn("Skipping development or system egg: %s",dist) + skipped[dist] = 1 + continue + + if dist in req and (dist.precedence<=SOURCE_DIST or not source): + return dist + + if force_scan: + self.prescan() + self.find_packages(requirement) + dist = find(requirement) + + if local_index is not None: + dist = dist or find(requirement, local_index) + + if dist is None and self.to_scan is not None: + self.prescan() + dist = find(requirement) + + if dist is None and not force_scan: + self.find_packages(requirement) + dist = find(requirement) + + if dist is None: + self.warn( + "No local packages or download links found for %s%s", + (source and "a source distribution of " or ""), + requirement, + ) + else: + self.info("Best match: %s", dist) + return dist.clone(location=self.download(dist.location, tmpdir)) + + def fetch(self, requirement, tmpdir, force_scan=False, source=False): + """Obtain a file suitable for fulfilling `requirement` + + DEPRECATED; use the ``fetch_distribution()`` method now instead. For + backward compatibility, this routine is identical but returns the + ``location`` of the downloaded distribution instead of a distribution + object. + """ + dist = self.fetch_distribution(requirement,tmpdir,force_scan,source) + if dist is not None: + return dist.location + return None + + def gen_setup(self, filename, fragment, tmpdir): + match = EGG_FRAGMENT.match(fragment) + dists = match and [ + d for d in + interpret_distro_name(filename, match.group(1), None) if d.version + ] or [] + + if len(dists)==1: # unambiguous ``#egg`` fragment + basename = os.path.basename(filename) + + # Make sure the file has been downloaded to the temp dir. + if os.path.dirname(filename) != tmpdir: + dst = os.path.join(tmpdir, basename) + from setuptools.command.easy_install import samefile + if not samefile(filename, dst): + shutil.copy2(filename, dst) + filename=dst + + file = open(os.path.join(tmpdir, 'setup.py'), 'w') + file.write( + "from setuptools import setup\n" + "setup(name=%r, version=%r, py_modules=[%r])\n" + % ( + dists[0].project_name, dists[0].version, + os.path.splitext(basename)[0] + ) + ) + file.close() + return filename + + elif match: + raise DistutilsError( + "Can't unambiguously interpret project/version identifier %r; " + "any dashes in the name or version should be escaped using " + "underscores. %r" % (fragment,dists) + ) + else: + raise DistutilsError( + "Can't process plain .py files without an '#egg=name-version'" + " suffix to enable automatic setup script generation." + ) + + dl_blocksize = 8192 + def _download_to(self, url, filename): + self.info("Downloading %s", url) + # Download the file + fp, tfp, info = None, None, None + try: + checker = HashChecker.from_url(url) + fp = self.open_url(strip_fragment(url)) + if isinstance(fp, HTTPError): + raise DistutilsError( + "Can't download %s: %s %s" % (url, fp.code,fp.msg) + ) + headers = fp.info() + blocknum = 0 + bs = self.dl_blocksize + size = -1 + if "content-length" in headers: + # Some servers return multiple Content-Length headers :( + sizes = get_all_headers(headers, 'Content-Length') + size = max(map(int, sizes)) + self.reporthook(url, filename, blocknum, bs, size) + tfp = open(filename,'wb') + while True: + block = fp.read(bs) + if block: + checker.feed(block) + tfp.write(block) + blocknum += 1 + self.reporthook(url, filename, blocknum, bs, size) + else: + break + self.check_hash(checker, filename, tfp) + return headers + finally: + if fp: fp.close() + if tfp: tfp.close() + + def reporthook(self, url, filename, blocknum, blksize, size): + pass # no-op + + def open_url(self, url, warning=None): + if url.startswith('file:'): + return local_open(url) + try: + return open_with_auth(url, self.opener) + except (ValueError, httplib.InvalidURL): + v = sys.exc_info()[1] + msg = ' '.join([str(arg) for arg in v.args]) + if warning: + self.warn(warning, msg) + else: + raise DistutilsError('%s %s' % (url, msg)) + except urllib2.HTTPError: + v = sys.exc_info()[1] + return v + except urllib2.URLError: + v = sys.exc_info()[1] + if warning: + self.warn(warning, v.reason) + else: + raise DistutilsError("Download error for %s: %s" + % (url, v.reason)) + except httplib.BadStatusLine: + v = sys.exc_info()[1] + if warning: + self.warn(warning, v.line) + else: + raise DistutilsError( + '%s returned a bad status line. The server might be ' + 'down, %s' % + (url, v.line) + ) + except httplib.HTTPException: + v = sys.exc_info()[1] + if warning: + self.warn(warning, v) + else: + raise DistutilsError("Download error for %s: %s" + % (url, v)) + + def _download_url(self, scheme, url, tmpdir): + # Determine download filename + # + name, fragment = egg_info_for_url(url) + if name: + while '..' in name: + name = name.replace('..','.').replace('\\','_') + else: + name = "__downloaded__" # default if URL has no path contents + + if name.endswith('.egg.zip'): + name = name[:-4] # strip the extra .zip before download + + filename = os.path.join(tmpdir,name) + + # Download the file + # + if scheme=='svn' or scheme.startswith('svn+'): + return self._download_svn(url, filename) + elif scheme=='git' or scheme.startswith('git+'): + return self._download_git(url, filename) + elif scheme.startswith('hg+'): + return self._download_hg(url, filename) + elif scheme=='file': + return url2pathname(urlparse(url)[2]) + else: + self.url_ok(url, True) # raises error if not allowed + return self._attempt_download(url, filename) + + def scan_url(self, url): + self.process_url(url, True) + + def _attempt_download(self, url, filename): + headers = self._download_to(url, filename) + if 'html' in headers.get('content-type','').lower(): + return self._download_html(url, headers, filename) + else: + return filename + + def _download_html(self, url, headers, filename): + file = open(filename) + for line in file: + if line.strip(): + # Check for a subversion index page + if re.search(r'<title>([^- ]+ - )?Revision \d+:', line): + # it's a subversion index page: + file.close() + os.unlink(filename) + return self._download_svn(url, filename) + break # not an index page + file.close() + os.unlink(filename) + raise DistutilsError("Unexpected HTML page found at "+url) + + def _download_svn(self, url, filename): + url = url.split('#',1)[0] # remove any fragment for svn's sake + creds = '' + if url.lower().startswith('svn:') and '@' in url: + scheme, netloc, path, p, q, f = urlparse(url) + if not netloc and path.startswith('//') and '/' in path[2:]: + netloc, path = path[2:].split('/',1) + auth, host = splituser(netloc) + if auth: + if ':' in auth: + user, pw = auth.split(':',1) + creds = " --username=%s --password=%s" % (user, pw) + else: + creds = " --username="+auth + netloc = host + url = urlunparse((scheme, netloc, url, p, q, f)) + self.info("Doing subversion checkout from %s to %s", url, filename) + os.system("svn checkout%s -q %s %s" % (creds, url, filename)) + return filename + + @staticmethod + def _vcs_split_rev_from_url(url, pop_prefix=False): + scheme, netloc, path, query, frag = urlsplit(url) + + scheme = scheme.split('+', 1)[-1] + + # Some fragment identification fails + path = path.split('#',1)[0] + + rev = None + if '@' in path: + path, rev = path.rsplit('@', 1) + + # Also, discard fragment + url = urlunsplit((scheme, netloc, path, query, '')) + + return url, rev + + def _download_git(self, url, filename): + filename = filename.split('#',1)[0] + url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) + + self.info("Doing git clone from %s to %s", url, filename) + os.system("git clone --quiet %s %s" % (url, filename)) + + if rev is not None: + self.info("Checking out %s", rev) + os.system("(cd %s && git checkout --quiet %s)" % ( + filename, + rev, + )) + + return filename + + def _download_hg(self, url, filename): + filename = filename.split('#',1)[0] + url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) + + self.info("Doing hg clone from %s to %s", url, filename) + os.system("hg clone --quiet %s %s" % (url, filename)) + + if rev is not None: + self.info("Updating to %s", rev) + os.system("(cd %s && hg up -C -r %s >&-)" % ( + filename, + rev, + )) + + return filename + + def debug(self, msg, *args): + log.debug(msg, *args) + + def info(self, msg, *args): + log.info(msg, *args) + + def warn(self, msg, *args): + log.warn(msg, *args) + +# This pattern matches a character entity reference (a decimal numeric +# references, a hexadecimal numeric reference, or a named reference). +entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub + +def uchr(c): + if not isinstance(c, int): + return c + if c>255: return unichr(c) + return chr(c) + +def decode_entity(match): + what = match.group(1) + if what.startswith('#x'): + what = int(what[2:], 16) + elif what.startswith('#'): + what = int(what[1:]) + else: + what = name2codepoint.get(what, match.group(0)) + return uchr(what) + +def htmldecode(text): + """Decode HTML entities in the given text.""" + return entity_sub(decode_entity, text) + +def socket_timeout(timeout=15): + def _socket_timeout(func): + def _socket_timeout(*args, **kwargs): + old_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(timeout) + try: + return func(*args, **kwargs) + finally: + socket.setdefaulttimeout(old_timeout) + return _socket_timeout + return _socket_timeout + +def _encode_auth(auth): + """ + A function compatible with Python 2.3-3.3 that will encode + auth from a URL suitable for an HTTP header. + >>> _encode_auth('username%3Apassword') + u'dXNlcm5hbWU6cGFzc3dvcmQ=' + """ + auth_s = unquote(auth) + # convert to bytes + auth_bytes = auth_s.encode() + # use the legacy interface for Python 2.3 support + encoded_bytes = base64.encodestring(auth_bytes) + # convert back to a string + encoded = encoded_bytes.decode() + # strip the trailing carriage return + return encoded.rstrip() + +def open_with_auth(url, opener=urllib2.urlopen): + """Open a urllib2 request, handling HTTP authentication""" + + scheme, netloc, path, params, query, frag = urlparse(url) + + # Double scheme does not raise on Mac OS X as revealed by a + # failing test. We would expect "nonnumeric port". Refs #20. + if netloc.endswith(':'): + raise httplib.InvalidURL("nonnumeric port: ''") + + if scheme in ('http', 'https'): + auth, host = splituser(netloc) + else: + auth = None + + if auth: + auth = "Basic " + _encode_auth(auth) + new_url = urlunparse((scheme,host,path,params,query,frag)) + request = urllib2.Request(new_url) + request.add_header("Authorization", auth) + else: + request = urllib2.Request(url) + + request.add_header('User-Agent', user_agent) + fp = opener(request) + + if auth: + # Put authentication info back into request URL if same host, + # so that links found on the page will work + s2, h2, path2, param2, query2, frag2 = urlparse(fp.url) + if s2==scheme and h2==host: + fp.url = urlunparse((s2,netloc,path2,param2,query2,frag2)) + + return fp + +# adding a timeout to avoid freezing package_index +open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) + + +def fix_sf_url(url): + return url # backward compatibility + +def local_open(url): + """Read a local path, with special support for directories""" + scheme, server, path, param, query, frag = urlparse(url) + filename = url2pathname(path) + if os.path.isfile(filename): + return urllib2.urlopen(url) + elif path.endswith('/') and os.path.isdir(filename): + files = [] + for f in os.listdir(filename): + if f=='index.html': + fp = open(os.path.join(filename,f),'rb') + body = fp.read() + fp.close() + break + elif os.path.isdir(os.path.join(filename,f)): + f+='/' + files.append("<a href=%r>%s</a>" % (f,f)) + else: + body = ("<html><head><title>%s" % url) + \ + "%s" % '\n'.join(files) + status, message = 200, "OK" + else: + status, message, body = 404, "Path not found", "Not found" + + return HTTPError(url, status, message, + {'content-type':'text/html'}, StringIO(body)) diff --git a/awx/lib/site-packages/setuptools/py24compat.py b/awx/lib/site-packages/setuptools/py24compat.py new file mode 100644 index 0000000000..40e9ae0f74 --- /dev/null +++ b/awx/lib/site-packages/setuptools/py24compat.py @@ -0,0 +1,17 @@ +""" +Forward-compatibility support for Python 2.4 and earlier +""" + +# from jaraco.compat 1.2 +try: + from functools import wraps +except ImportError: + def wraps(func): + "Just return the function unwrapped" + return lambda x: x + + +try: + import hashlib +except ImportError: + from setuptools._backport import hashlib diff --git a/awx/lib/site-packages/setuptools/py26compat.py b/awx/lib/site-packages/setuptools/py26compat.py new file mode 100644 index 0000000000..738b0cc40b --- /dev/null +++ b/awx/lib/site-packages/setuptools/py26compat.py @@ -0,0 +1,19 @@ +""" +Compatibility Support for Python 2.6 and earlier +""" + +import sys + +from setuptools.compat import splittag + +def strip_fragment(url): + """ + In `Python 8280 `_, Python 2.7 and + later was patched to disregard the fragment when making URL requests. + Do the same for Python 2.6 and earlier. + """ + url, fragment = splittag(url) + return url + +if sys.version_info >= (2,7): + strip_fragment = lambda x: x diff --git a/awx/lib/site-packages/setuptools/py27compat.py b/awx/lib/site-packages/setuptools/py27compat.py new file mode 100644 index 0000000000..9d2886db99 --- /dev/null +++ b/awx/lib/site-packages/setuptools/py27compat.py @@ -0,0 +1,15 @@ +""" +Compatibility Support for Python 2.7 and earlier +""" + +import sys + +def get_all_headers(message, key): + """ + Given an HTTPMessage, return all headers matching a given key. + """ + return message.get_all(key) + +if sys.version_info < (3,): + def get_all_headers(message, key): + return message.getheaders(key) diff --git a/awx/lib/site-packages/setuptools/sandbox.py b/awx/lib/site-packages/setuptools/sandbox.py new file mode 100644 index 0000000000..29fc07b8d9 --- /dev/null +++ b/awx/lib/site-packages/setuptools/sandbox.py @@ -0,0 +1,324 @@ +import os, sys, tempfile, operator, pkg_resources +if os.name == "java": + import org.python.modules.posix.PosixModule as _os +else: + _os = sys.modules[os.name] +try: + _file = file +except NameError: + _file = None +_open = open +from distutils.errors import DistutilsError +from pkg_resources import working_set + +from setuptools.compat import builtins, execfile, reduce + +__all__ = [ + "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup", +] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +def run_setup(setup_script, args): + """Run a distutils setup script, sandboxed in its directory""" + old_dir = os.getcwd() + save_argv = sys.argv[:] + save_path = sys.path[:] + setup_dir = os.path.abspath(os.path.dirname(setup_script)) + temp_dir = os.path.join(setup_dir,'temp') + if not os.path.isdir(temp_dir): os.makedirs(temp_dir) + save_tmp = tempfile.tempdir + save_modules = sys.modules.copy() + pr_state = pkg_resources.__getstate__() + try: + tempfile.tempdir = temp_dir + os.chdir(setup_dir) + try: + sys.argv[:] = [setup_script]+list(args) + sys.path.insert(0, setup_dir) + # reset to include setup dir, w/clean callback list + working_set.__init__() + working_set.callbacks.append(lambda dist:dist.activate()) + DirectorySandbox(setup_dir).run( + lambda: execfile( + "setup.py", + {'__file__':setup_script, '__name__':'__main__'} + ) + ) + except SystemExit: + v = sys.exc_info()[1] + if v.args and v.args[0]: + raise + # Normal exit, just return + finally: + pkg_resources.__setstate__(pr_state) + sys.modules.update(save_modules) + # remove any modules imported within the sandbox + del_modules = [ + mod_name for mod_name in sys.modules + if mod_name not in save_modules + # exclude any encodings modules. See #285 + and not mod_name.startswith('encodings.') + ] + list(map(sys.modules.__delitem__, del_modules)) + os.chdir(old_dir) + sys.path[:] = save_path + sys.argv[:] = save_argv + tempfile.tempdir = save_tmp + + + +class AbstractSandbox: + """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" + + _active = False + + def __init__(self): + self._attrs = [ + name for name in dir(_os) + if not name.startswith('_') and hasattr(self,name) + ] + + def _copy(self, source): + for name in self._attrs: + setattr(os, name, getattr(source,name)) + + def run(self, func): + """Run 'func' under os sandboxing""" + try: + self._copy(self) + if _file: + builtins.file = self._file + builtins.open = self._open + self._active = True + return func() + finally: + self._active = False + if _file: + builtins.file = _file + builtins.open = _open + self._copy(_os) + + def _mk_dual_path_wrapper(name): + original = getattr(_os,name) + def wrap(self,src,dst,*args,**kw): + if self._active: + src,dst = self._remap_pair(name,src,dst,*args,**kw) + return original(src,dst,*args,**kw) + return wrap + + for name in ["rename", "link", "symlink"]: + if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name) + + + def _mk_single_path_wrapper(name, original=None): + original = original or getattr(_os,name) + def wrap(self,path,*args,**kw): + if self._active: + path = self._remap_input(name,path,*args,**kw) + return original(path,*args,**kw) + return wrap + + if _file: + _file = _mk_single_path_wrapper('file', _file) + _open = _mk_single_path_wrapper('open', _open) + for name in [ + "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir", + "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat", + "startfile", "mkfifo", "mknod", "pathconf", "access" + ]: + if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name) + + def _mk_single_with_return(name): + original = getattr(_os,name) + def wrap(self,path,*args,**kw): + if self._active: + path = self._remap_input(name,path,*args,**kw) + return self._remap_output(name, original(path,*args,**kw)) + return original(path,*args,**kw) + return wrap + + for name in ['readlink', 'tempnam']: + if hasattr(_os,name): locals()[name] = _mk_single_with_return(name) + + def _mk_query(name): + original = getattr(_os,name) + def wrap(self,*args,**kw): + retval = original(*args,**kw) + if self._active: + return self._remap_output(name, retval) + return retval + return wrap + + for name in ['getcwd', 'tmpnam']: + if hasattr(_os,name): locals()[name] = _mk_query(name) + + def _validate_path(self,path): + """Called to remap or validate any path, whether input or output""" + return path + + def _remap_input(self,operation,path,*args,**kw): + """Called for path inputs""" + return self._validate_path(path) + + def _remap_output(self,operation,path): + """Called for path outputs""" + return self._validate_path(path) + + def _remap_pair(self,operation,src,dst,*args,**kw): + """Called for path pairs like rename, link, and symlink operations""" + return ( + self._remap_input(operation+'-from',src,*args,**kw), + self._remap_input(operation+'-to',dst,*args,**kw) + ) + + +if hasattr(os, 'devnull'): + _EXCEPTIONS = [os.devnull,] +else: + _EXCEPTIONS = [] + +try: + from win32com.client.gencache import GetGeneratePath + _EXCEPTIONS.append(GetGeneratePath()) + del GetGeneratePath +except ImportError: + # it appears pywin32 is not installed, so no need to exclude. + pass + +class DirectorySandbox(AbstractSandbox): + """Restrict operations to a single subdirectory - pseudo-chroot""" + + write_ops = dict.fromkeys([ + "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir", + "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam", + ]) + + def __init__(self, sandbox, exceptions=_EXCEPTIONS): + self._sandbox = os.path.normcase(os.path.realpath(sandbox)) + self._prefix = os.path.join(self._sandbox,'') + self._exceptions = [os.path.normcase(os.path.realpath(path)) for path in exceptions] + AbstractSandbox.__init__(self) + + def _violation(self, operation, *args, **kw): + raise SandboxViolation(operation, args, kw) + + if _file: + def _file(self, path, mode='r', *args, **kw): + if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): + self._violation("file", path, mode, *args, **kw) + return _file(path,mode,*args,**kw) + + def _open(self, path, mode='r', *args, **kw): + if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): + self._violation("open", path, mode, *args, **kw) + return _open(path,mode,*args,**kw) + + def tmpnam(self): + self._violation("tmpnam") + + def _ok(self,path): + active = self._active + try: + self._active = False + realpath = os.path.normcase(os.path.realpath(path)) + if (self._exempted(realpath) or realpath == self._sandbox + or realpath.startswith(self._prefix)): + return True + finally: + self._active = active + + def _exempted(self, filepath): + exception_matches = map(filepath.startswith, self._exceptions) + return True in exception_matches + + def _remap_input(self,operation,path,*args,**kw): + """Called for path inputs""" + if operation in self.write_ops and not self._ok(path): + self._violation(operation, os.path.realpath(path), *args, **kw) + return path + + def _remap_pair(self,operation,src,dst,*args,**kw): + """Called for path pairs like rename, link, and symlink operations""" + if not self._ok(src) or not self._ok(dst): + self._violation(operation, src, dst, *args, **kw) + return (src,dst) + + def open(self, file, flags, mode=0x1FF, *args, **kw): # 0777 + """Called for low-level os.open()""" + if flags & WRITE_FLAGS and not self._ok(file): + self._violation("os.open", file, flags, mode, *args, **kw) + return _os.open(file,flags,mode, *args, **kw) + +WRITE_FLAGS = reduce( + operator.or_, [getattr(_os, a, 0) for a in + "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()] +) + +class SandboxViolation(DistutilsError): + """A setup script attempted to modify the filesystem outside the sandbox""" + + def __str__(self): + return """SandboxViolation: %s%r %s + +The package setup script has attempted to modify files on your system +that are not within the EasyInstall build area, and has been aborted. + +This package cannot be safely installed by EasyInstall, and may not +support alternate installation locations even if you run its setup +script by hand. Please inform the package's author and the EasyInstall +maintainers to find out if a fix or workaround is available.""" % self.args + + + + + + + + + + + + + + + + + + + + + + + + + + + +# diff --git a/awx/lib/site-packages/setuptools/script template (dev).py b/awx/lib/site-packages/setuptools/script template (dev).py new file mode 100644 index 0000000000..901790e7bd --- /dev/null +++ b/awx/lib/site-packages/setuptools/script template (dev).py @@ -0,0 +1,9 @@ +# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r +__requires__ = """%(spec)r""" +from pkg_resources import require; require("""%(spec)r""") +del require +__file__ = """%(dev_path)r""" +try: + execfile(__file__) +except NameError: + exec(compile(open(__file__).read(), __file__, 'exec')) diff --git a/awx/lib/site-packages/setuptools/script template.py b/awx/lib/site-packages/setuptools/script template.py new file mode 100644 index 0000000000..8dd5d51001 --- /dev/null +++ b/awx/lib/site-packages/setuptools/script template.py @@ -0,0 +1,4 @@ +# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r +__requires__ = """%(spec)r""" +import pkg_resources +pkg_resources.run_script("""%(spec)r""", """%(script_name)r""") diff --git a/awx/lib/site-packages/setuptools/site-patch.py b/awx/lib/site-packages/setuptools/site-patch.py new file mode 100644 index 0000000000..a7166f1407 --- /dev/null +++ b/awx/lib/site-packages/setuptools/site-patch.py @@ -0,0 +1,83 @@ +def __boot(): + import sys, os, os.path + PYTHONPATH = os.environ.get('PYTHONPATH') + if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH): + PYTHONPATH = [] + else: + PYTHONPATH = PYTHONPATH.split(os.pathsep) + + pic = getattr(sys,'path_importer_cache',{}) + stdpath = sys.path[len(PYTHONPATH):] + mydir = os.path.dirname(__file__) + #print "searching",stdpath,sys.path + + for item in stdpath: + if item==mydir or not item: + continue # skip if current dir. on Windows, or my own directory + importer = pic.get(item) + if importer is not None: + loader = importer.find_module('site') + if loader is not None: + # This should actually reload the current module + loader.load_module('site') + break + else: + try: + import imp # Avoid import loop in Python >= 3.3 + stream, path, descr = imp.find_module('site',[item]) + except ImportError: + continue + if stream is None: + continue + try: + # This should actually reload the current module + imp.load_module('site',stream,path,descr) + finally: + stream.close() + break + else: + raise ImportError("Couldn't find the real 'site' module") + + #print "loaded", __file__ + + known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp + + oldpos = getattr(sys,'__egginsert',0) # save old insertion position + sys.__egginsert = 0 # and reset the current one + + for item in PYTHONPATH: + addsitedir(item) + + sys.__egginsert += oldpos # restore effective old position + + d,nd = makepath(stdpath[0]) + insert_at = None + new_path = [] + + for item in sys.path: + p,np = makepath(item) + + if np==nd and insert_at is None: + # We've hit the first 'system' path entry, so added entries go here + insert_at = len(new_path) + + if np in known_paths or insert_at is None: + new_path.append(item) + else: + # new path after the insert point, back-insert it + new_path.insert(insert_at, item) + insert_at += 1 + + sys.path[:] = new_path + +if __name__=='site': + __boot() + del __boot + + + + + + + + diff --git a/awx/lib/site-packages/setuptools/ssl_support.py b/awx/lib/site-packages/setuptools/ssl_support.py new file mode 100644 index 0000000000..90359b2c51 --- /dev/null +++ b/awx/lib/site-packages/setuptools/ssl_support.py @@ -0,0 +1,261 @@ +import sys, os, socket, atexit, re +import pkg_resources +from pkg_resources import ResolutionError, ExtractionError +from setuptools.compat import urllib2 + +try: + import ssl +except ImportError: + ssl = None + +__all__ = [ + 'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths', + 'opener_for' +] + +cert_paths = """ +/etc/pki/tls/certs/ca-bundle.crt +/etc/ssl/certs/ca-certificates.crt +/usr/share/ssl/certs/ca-bundle.crt +/usr/local/share/certs/ca-root.crt +/etc/ssl/cert.pem +/System/Library/OpenSSL/certs/cert.pem +""".strip().split() + + +HTTPSHandler = HTTPSConnection = object + +for what, where in ( + ('HTTPSHandler', ['urllib2','urllib.request']), + ('HTTPSConnection', ['httplib', 'http.client']), +): + for module in where: + try: + exec("from %s import %s" % (module, what)) + except ImportError: + pass + +is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection) + + + + + +try: + from socket import create_connection +except ImportError: + _GLOBAL_DEFAULT_TIMEOUT = getattr(socket, '_GLOBAL_DEFAULT_TIMEOUT', object()) + def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + An host of '' or port 0 tells the OS to use the default. + """ + host, port = address + err = None + for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket.socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except error: + err = True + if sock is not None: + sock.close() + if err: + raise + else: + raise error("getaddrinfo returns an empty list") + + +try: + from ssl import CertificateError, match_hostname +except ImportError: + class CertificateError(ValueError): + pass + + def _dnsname_to_pat(dn, max_wildcards=1): + pats = [] + for frag in dn.split(r'.'): + if frag.count('*') > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survery of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + if frag == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + else: + # Otherwise, '*' matches any dotless fragment. + frag = re.escape(frag) + pats.append(frag.replace(r'\*', '[^.]*')) + return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules + are mostly followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_to_pat(value).match(hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_to_pat(value).match(hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + + + + + + + + + + + + + + + + + + + + + + + +class VerifyingHTTPSHandler(HTTPSHandler): + """Simple verifying handler: no auth, subclasses, timeouts, etc.""" + + def __init__(self, ca_bundle): + self.ca_bundle = ca_bundle + HTTPSHandler.__init__(self) + + def https_open(self, req): + return self.do_open( + lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req + ) + + +class VerifyingHTTPSConn(HTTPSConnection): + """Simple verifying connection: no auth, subclasses, timeouts, etc.""" + def __init__(self, host, ca_bundle, **kw): + HTTPSConnection.__init__(self, host, **kw) + self.ca_bundle = ca_bundle + + def connect(self): + sock = create_connection( + (self.host, self.port), getattr(self,'source_address',None) + ) + + # Handle the socket if a (proxy) tunnel is present + if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None): + self.sock = sock + self._tunnel() + + self.sock = ssl.wrap_socket( + sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle + ) + try: + match_hostname(self.sock.getpeercert(), self.host) + except CertificateError: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + +def opener_for(ca_bundle=None): + """Get a urlopen() replacement that uses ca_bundle for verification""" + return urllib2.build_opener( + VerifyingHTTPSHandler(ca_bundle or find_ca_bundle()) + ).open + + + +_wincerts = None + +def get_win_certfile(): + global _wincerts + if _wincerts is not None: + return _wincerts.name + + try: + from wincertstore import CertFile + except ImportError: + return None + + class MyCertFile(CertFile): + def __init__(self, stores=(), certs=()): + CertFile.__init__(self) + for store in stores: + self.addstore(store) + self.addcerts(certs) + atexit.register(self.close) + + _wincerts = MyCertFile(stores=['CA', 'ROOT']) + return _wincerts.name + + +def find_ca_bundle(): + """Return an existing CA bundle path, or None""" + if os.name=='nt': + return get_win_certfile() + else: + for cert_path in cert_paths: + if os.path.isfile(cert_path): + return cert_path + try: + return pkg_resources.resource_filename('certifi', 'cacert.pem') + except (ImportError, ResolutionError, ExtractionError): + return None + + + + + diff --git a/awx/lib/site-packages/setuptools/tests/__init__.py b/awx/lib/site-packages/setuptools/tests/__init__.py new file mode 100644 index 0000000000..b5328ce67a --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/__init__.py @@ -0,0 +1,352 @@ +"""Tests for the 'setuptools' package""" +import sys +import os +import unittest +from setuptools.tests import doctest +import distutils.core +import distutils.cmd +from distutils.errors import DistutilsOptionError, DistutilsPlatformError +from distutils.errors import DistutilsSetupError +from distutils.core import Extension +from distutils.version import LooseVersion +from setuptools.compat import func_code + +from setuptools.compat import func_code +import setuptools.dist +import setuptools.depends as dep +from setuptools import Feature +from setuptools.depends import Require + +def additional_tests(): + import doctest, unittest + suite = unittest.TestSuite(( + doctest.DocFileSuite( + os.path.join('tests', 'api_tests.txt'), + optionflags=doctest.ELLIPSIS, package='pkg_resources', + ), + )) + if sys.platform == 'win32': + suite.addTest(doctest.DocFileSuite('win_script_wrapper.txt')) + return suite + +def makeSetup(**args): + """Return distribution from 'setup(**args)', without executing commands""" + + distutils.core._setup_stop_after = "commandline" + + # Don't let system command line leak into tests! + args.setdefault('script_args',['install']) + + try: + return setuptools.setup(**args) + finally: + distutils.core._setup_stop_after = None + + +class DependsTests(unittest.TestCase): + + def testExtractConst(self): + if not hasattr(dep, 'extract_constant'): + # skip on non-bytecode platforms + return + + def f1(): + global x, y, z + x = "test" + y = z + + fc = func_code(f1) + # unrecognized name + self.assertEqual(dep.extract_constant(fc,'q', -1), None) + + # constant assigned + self.assertEqual(dep.extract_constant(fc,'x', -1), "test") + + # expression assigned + self.assertEqual(dep.extract_constant(fc,'y', -1), -1) + + # recognized name, not assigned + self.assertEqual(dep.extract_constant(fc,'z', -1), None) + + def testFindModule(self): + self.assertRaises(ImportError, dep.find_module, 'no-such.-thing') + self.assertRaises(ImportError, dep.find_module, 'setuptools.non-existent') + f,p,i = dep.find_module('setuptools.tests') + f.close() + + def testModuleExtract(self): + if not hasattr(dep, 'get_module_constant'): + # skip on non-bytecode platforms + return + + from email import __version__ + self.assertEqual( + dep.get_module_constant('email','__version__'), __version__ + ) + self.assertEqual( + dep.get_module_constant('sys','version'), sys.version + ) + self.assertEqual( + dep.get_module_constant('setuptools.tests','__doc__'),__doc__ + ) + + def testRequire(self): + if not hasattr(dep, 'extract_constant'): + # skip on non-bytecode platformsh + return + + req = Require('Email','1.0.3','email') + + self.assertEqual(req.name, 'Email') + self.assertEqual(req.module, 'email') + self.assertEqual(req.requested_version, '1.0.3') + self.assertEqual(req.attribute, '__version__') + self.assertEqual(req.full_name(), 'Email-1.0.3') + + from email import __version__ + self.assertEqual(req.get_version(), __version__) + self.assertTrue(req.version_ok('1.0.9')) + self.assertTrue(not req.version_ok('0.9.1')) + self.assertTrue(not req.version_ok('unknown')) + + self.assertTrue(req.is_present()) + self.assertTrue(req.is_current()) + + req = Require('Email 3000','03000','email',format=LooseVersion) + self.assertTrue(req.is_present()) + self.assertTrue(not req.is_current()) + self.assertTrue(not req.version_ok('unknown')) + + req = Require('Do-what-I-mean','1.0','d-w-i-m') + self.assertTrue(not req.is_present()) + self.assertTrue(not req.is_current()) + + req = Require('Tests', None, 'tests', homepage="http://example.com") + self.assertEqual(req.format, None) + self.assertEqual(req.attribute, None) + self.assertEqual(req.requested_version, None) + self.assertEqual(req.full_name(), 'Tests') + self.assertEqual(req.homepage, 'http://example.com') + + paths = [os.path.dirname(p) for p in __path__] + self.assertTrue(req.is_present(paths)) + self.assertTrue(req.is_current(paths)) + + +class DistroTests(unittest.TestCase): + + def setUp(self): + self.e1 = Extension('bar.ext',['bar.c']) + self.e2 = Extension('c.y', ['y.c']) + + self.dist = makeSetup( + packages=['a', 'a.b', 'a.b.c', 'b', 'c'], + py_modules=['b.d','x'], + ext_modules = (self.e1, self.e2), + package_dir = {}, + ) + + def testDistroType(self): + self.assertTrue(isinstance(self.dist,setuptools.dist.Distribution)) + + def testExcludePackage(self): + self.dist.exclude_package('a') + self.assertEqual(self.dist.packages, ['b','c']) + + self.dist.exclude_package('b') + self.assertEqual(self.dist.packages, ['c']) + self.assertEqual(self.dist.py_modules, ['x']) + self.assertEqual(self.dist.ext_modules, [self.e1, self.e2]) + + self.dist.exclude_package('c') + self.assertEqual(self.dist.packages, []) + self.assertEqual(self.dist.py_modules, ['x']) + self.assertEqual(self.dist.ext_modules, [self.e1]) + + # test removals from unspecified options + makeSetup().exclude_package('x') + + def testIncludeExclude(self): + # remove an extension + self.dist.exclude(ext_modules=[self.e1]) + self.assertEqual(self.dist.ext_modules, [self.e2]) + + # add it back in + self.dist.include(ext_modules=[self.e1]) + self.assertEqual(self.dist.ext_modules, [self.e2, self.e1]) + + # should not add duplicate + self.dist.include(ext_modules=[self.e1]) + self.assertEqual(self.dist.ext_modules, [self.e2, self.e1]) + + def testExcludePackages(self): + self.dist.exclude(packages=['c','b','a']) + self.assertEqual(self.dist.packages, []) + self.assertEqual(self.dist.py_modules, ['x']) + self.assertEqual(self.dist.ext_modules, [self.e1]) + + def testEmpty(self): + dist = makeSetup() + dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2]) + dist = makeSetup() + dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2]) + + def testContents(self): + self.assertTrue(self.dist.has_contents_for('a')) + self.dist.exclude_package('a') + self.assertTrue(not self.dist.has_contents_for('a')) + + self.assertTrue(self.dist.has_contents_for('b')) + self.dist.exclude_package('b') + self.assertTrue(not self.dist.has_contents_for('b')) + + self.assertTrue(self.dist.has_contents_for('c')) + self.dist.exclude_package('c') + self.assertTrue(not self.dist.has_contents_for('c')) + + def testInvalidIncludeExclude(self): + self.assertRaises(DistutilsSetupError, + self.dist.include, nonexistent_option='x' + ) + self.assertRaises(DistutilsSetupError, + self.dist.exclude, nonexistent_option='x' + ) + self.assertRaises(DistutilsSetupError, + self.dist.include, packages={'x':'y'} + ) + self.assertRaises(DistutilsSetupError, + self.dist.exclude, packages={'x':'y'} + ) + self.assertRaises(DistutilsSetupError, + self.dist.include, ext_modules={'x':'y'} + ) + self.assertRaises(DistutilsSetupError, + self.dist.exclude, ext_modules={'x':'y'} + ) + + self.assertRaises(DistutilsSetupError, + self.dist.include, package_dir=['q'] + ) + self.assertRaises(DistutilsSetupError, + self.dist.exclude, package_dir=['q'] + ) + + +class FeatureTests(unittest.TestCase): + + def setUp(self): + self.req = Require('Distutils','1.0.3','distutils') + self.dist = makeSetup( + features={ + 'foo': Feature("foo",standard=True,require_features=['baz',self.req]), + 'bar': Feature("bar", standard=True, packages=['pkg.bar'], + py_modules=['bar_et'], remove=['bar.ext'], + ), + 'baz': Feature( + "baz", optional=False, packages=['pkg.baz'], + scripts = ['scripts/baz_it'], + libraries=[('libfoo','foo/foofoo.c')] + ), + 'dwim': Feature("DWIM", available=False, remove='bazish'), + }, + script_args=['--without-bar', 'install'], + packages = ['pkg.bar', 'pkg.foo'], + py_modules = ['bar_et', 'bazish'], + ext_modules = [Extension('bar.ext',['bar.c'])] + ) + + def testDefaults(self): + self.assertTrue(not + Feature( + "test",standard=True,remove='x',available=False + ).include_by_default() + ) + self.assertTrue( + Feature("test",standard=True,remove='x').include_by_default() + ) + # Feature must have either kwargs, removes, or require_features + self.assertRaises(DistutilsSetupError, Feature, "test") + + def testAvailability(self): + self.assertRaises( + DistutilsPlatformError, + self.dist.features['dwim'].include_in, self.dist + ) + + def testFeatureOptions(self): + dist = self.dist + self.assertTrue( + ('with-dwim',None,'include DWIM') in dist.feature_options + ) + self.assertTrue( + ('without-dwim',None,'exclude DWIM (default)') in dist.feature_options + ) + self.assertTrue( + ('with-bar',None,'include bar (default)') in dist.feature_options + ) + self.assertTrue( + ('without-bar',None,'exclude bar') in dist.feature_options + ) + self.assertEqual(dist.feature_negopt['without-foo'],'with-foo') + self.assertEqual(dist.feature_negopt['without-bar'],'with-bar') + self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim') + self.assertTrue(not 'without-baz' in dist.feature_negopt) + + def testUseFeatures(self): + dist = self.dist + self.assertEqual(dist.with_foo,1) + self.assertEqual(dist.with_bar,0) + self.assertEqual(dist.with_baz,1) + self.assertTrue(not 'bar_et' in dist.py_modules) + self.assertTrue(not 'pkg.bar' in dist.packages) + self.assertTrue('pkg.baz' in dist.packages) + self.assertTrue('scripts/baz_it' in dist.scripts) + self.assertTrue(('libfoo','foo/foofoo.c') in dist.libraries) + self.assertEqual(dist.ext_modules,[]) + self.assertEqual(dist.require_features, [self.req]) + + # If we ask for bar, it should fail because we explicitly disabled + # it on the command line + self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar') + + def testFeatureWithInvalidRemove(self): + self.assertRaises( + SystemExit, makeSetup, features = {'x':Feature('x', remove='y')} + ) + +class TestCommandTests(unittest.TestCase): + + def testTestIsCommand(self): + test_cmd = makeSetup().get_command_obj('test') + self.assertTrue(isinstance(test_cmd, distutils.cmd.Command)) + + def testLongOptSuiteWNoDefault(self): + ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite']) + ts1 = ts1.get_command_obj('test') + ts1.ensure_finalized() + self.assertEqual(ts1.test_suite, 'foo.tests.suite') + + def testDefaultSuite(self): + ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test') + ts2.ensure_finalized() + self.assertEqual(ts2.test_suite, 'bar.tests.suite') + + def testDefaultWModuleOnCmdLine(self): + ts3 = makeSetup( + test_suite='bar.tests', + script_args=['test','-m','foo.tests'] + ).get_command_obj('test') + ts3.ensure_finalized() + self.assertEqual(ts3.test_module, 'foo.tests') + self.assertEqual(ts3.test_suite, 'foo.tests.test_suite') + + def testConflictingOptions(self): + ts4 = makeSetup( + script_args=['test','-m','bar.tests', '-s','foo.tests.suite'] + ).get_command_obj('test') + self.assertRaises(DistutilsOptionError, ts4.ensure_finalized) + + def testNoSuite(self): + ts5 = makeSetup().get_command_obj('test') + ts5.ensure_finalized() + self.assertEqual(ts5.test_suite, None) diff --git a/awx/lib/site-packages/setuptools/tests/doctest.py b/awx/lib/site-packages/setuptools/tests/doctest.py new file mode 100644 index 0000000000..35d588d074 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/doctest.py @@ -0,0 +1,2683 @@ +# Module doctest. +# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). +# Major enhancements and refactoring by: +# Jim Fulton +# Edward Loper + +# Provided as-is; use at your own risk; no warranty; no promises; enjoy! + +try: + basestring +except NameError: + basestring = str + +try: + enumerate +except NameError: + def enumerate(seq): + return zip(range(len(seq)),seq) + +r"""Module doctest -- a framework for running examples in docstrings. + +In simplest use, end each module M to be tested with: + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + +Then running the module as a script will cause the examples in the +docstrings to get executed and verified: + +python M.py + +This won't display anything unless an example fails, in which case the +failing example(s) and the cause(s) of the failure(s) are printed to stdout +(why not stderr? because stderr is a lame hack <0.2 wink>), and the final +line of output is "Test failed.". + +Run it with the -v switch instead: + +python M.py -v + +and a detailed report of all examples tried is printed to stdout, along +with assorted summaries at the end. + +You can force verbose mode by passing "verbose=True" to testmod, or prohibit +it by passing "verbose=False". In either of those cases, sys.argv is not +examined by testmod. + +There are a variety of other ways to run doctests, including integration +with the unittest framework, and support for running non-Python text +files containing doctests. There are also many ways to override parts +of doctest's default behaviors. See the Library Reference Manual for +details. +""" + +__docformat__ = 'reStructuredText en' + +__all__ = [ + # 0, Option Flags + 'register_optionflag', + 'DONT_ACCEPT_TRUE_FOR_1', + 'DONT_ACCEPT_BLANKLINE', + 'NORMALIZE_WHITESPACE', + 'ELLIPSIS', + 'IGNORE_EXCEPTION_DETAIL', + 'COMPARISON_FLAGS', + 'REPORT_UDIFF', + 'REPORT_CDIFF', + 'REPORT_NDIFF', + 'REPORT_ONLY_FIRST_FAILURE', + 'REPORTING_FLAGS', + # 1. Utility Functions + 'is_private', + # 2. Example & DocTest + 'Example', + 'DocTest', + # 3. Doctest Parser + 'DocTestParser', + # 4. Doctest Finder + 'DocTestFinder', + # 5. Doctest Runner + 'DocTestRunner', + 'OutputChecker', + 'DocTestFailure', + 'UnexpectedException', + 'DebugRunner', + # 6. Test Functions + 'testmod', + 'testfile', + 'run_docstring_examples', + # 7. Tester + 'Tester', + # 8. Unittest Support + 'DocTestSuite', + 'DocFileSuite', + 'set_unittest_reportflags', + # 9. Debugging Support + 'script_from_examples', + 'testsource', + 'debug_src', + 'debug', +] + +import __future__ + +import sys, traceback, inspect, linecache, os, re, types +import unittest, difflib, pdb, tempfile +import warnings +from setuptools.compat import StringIO, execfile, exec_, func_code, im_func + +# Don't whine about the deprecated is_private function in this +# module's tests. +warnings.filterwarnings("ignore", "is_private", DeprecationWarning, + __name__, 0) + +# There are 4 basic classes: +# - Example: a pair, plus an intra-docstring line number. +# - DocTest: a collection of examples, parsed from a docstring, plus +# info about where the docstring came from (name, filename, lineno). +# - DocTestFinder: extracts DocTests from a given object's docstring and +# its contained objects' docstrings. +# - DocTestRunner: runs DocTest cases, and accumulates statistics. +# +# So the basic picture is: +# +# list of: +# +------+ +---------+ +-------+ +# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| +# +------+ +---------+ +-------+ +# | Example | +# | ... | +# | Example | +# +---------+ + +# Option constants. + +OPTIONFLAGS_BY_NAME = {} +def register_optionflag(name): + flag = 1 << len(OPTIONFLAGS_BY_NAME) + OPTIONFLAGS_BY_NAME[name] = flag + return flag + +DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') +DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') +NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') +ELLIPSIS = register_optionflag('ELLIPSIS') +IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') + +COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | + DONT_ACCEPT_BLANKLINE | + NORMALIZE_WHITESPACE | + ELLIPSIS | + IGNORE_EXCEPTION_DETAIL) + +REPORT_UDIFF = register_optionflag('REPORT_UDIFF') +REPORT_CDIFF = register_optionflag('REPORT_CDIFF') +REPORT_NDIFF = register_optionflag('REPORT_NDIFF') +REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') + +REPORTING_FLAGS = (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF | + REPORT_ONLY_FIRST_FAILURE) + +# Special string markers for use in `want` strings: +BLANKLINE_MARKER = '' +ELLIPSIS_MARKER = '...' + +###################################################################### +## Table of Contents +###################################################################### +# 1. Utility Functions +# 2. Example & DocTest -- store test cases +# 3. DocTest Parser -- extracts examples from strings +# 4. DocTest Finder -- extracts test cases from objects +# 5. DocTest Runner -- runs test cases +# 6. Test Functions -- convenient wrappers for testing +# 7. Tester Class -- for backwards compatibility +# 8. Unittest Support +# 9. Debugging Support +# 10. Example Usage + +###################################################################### +## 1. Utility Functions +###################################################################### + +def is_private(prefix, base): + """prefix, base -> true iff name prefix + "." + base is "private". + + Prefix may be an empty string, and base does not contain a period. + Prefix is ignored (although functions you write conforming to this + protocol may make use of it). + Return true iff base begins with an (at least one) underscore, but + does not both begin and end with (at least) two underscores. + + >>> is_private("a.b", "my_func") + False + >>> is_private("____", "_my_func") + True + >>> is_private("someclass", "__init__") + False + >>> is_private("sometypo", "__init_") + True + >>> is_private("x.y.z", "_") + True + >>> is_private("_x.y.z", "__") + False + >>> is_private("", "") # senseless but consistent + False + """ + warnings.warn("is_private is deprecated; it wasn't useful; " + "examine DocTestFinder.find() lists instead", + DeprecationWarning, stacklevel=2) + return base[:1] == "_" and not base[:2] == "__" == base[-2:] + +def _extract_future_flags(globs): + """ + Return the compiler-flags associated with the future features that + have been imported into the given namespace (globs). + """ + flags = 0 + for fname in __future__.all_feature_names: + feature = globs.get(fname, None) + if feature is getattr(__future__, fname): + flags |= feature.compiler_flag + return flags + +def _normalize_module(module, depth=2): + """ + Return the module specified by `module`. In particular: + - If `module` is a module, then return module. + - If `module` is a string, then import and return the + module with that name. + - If `module` is None, then return the calling module. + The calling module is assumed to be the module of + the stack frame at the given depth in the call stack. + """ + if inspect.ismodule(module): + return module + elif isinstance(module, basestring): + return __import__(module, globals(), locals(), ["*"]) + elif module is None: + return sys.modules[sys._getframe(depth).f_globals['__name__']] + else: + raise TypeError("Expected a module, string, or None") + +def _indent(s, indent=4): + """ + Add the given number of space characters to the beginning every + non-blank line in `s`, and return the result. + """ + # This regexp matches the start of non-blank lines: + return re.sub('(?m)^(?!$)', indent*' ', s) + +def _exception_traceback(exc_info): + """ + Return a string containing a traceback message for the given + exc_info tuple (as returned by sys.exc_info()). + """ + # Get a traceback message. + excout = StringIO() + exc_type, exc_val, exc_tb = exc_info + traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) + return excout.getvalue() + +# Override some StringIO methods. +class _SpoofOut(StringIO): + def getvalue(self): + result = StringIO.getvalue(self) + # If anything at all was written, make sure there's a trailing + # newline. There's no way for the expected output to indicate + # that a trailing newline is missing. + if result and not result.endswith("\n"): + result += "\n" + # Prevent softspace from screwing up the next test case, in + # case they used print with a trailing comma in an example. + if hasattr(self, "softspace"): + del self.softspace + return result + + def truncate(self, size=None): + StringIO.truncate(self, size) + if hasattr(self, "softspace"): + del self.softspace + +# Worst-case linear-time ellipsis matching. +def _ellipsis_match(want, got): + """ + Essentially the only subtle case: + >>> _ellipsis_match('aa...aa', 'aaa') + False + """ + if want.find(ELLIPSIS_MARKER)==-1: + return want == got + + # Find "the real" strings. + ws = want.split(ELLIPSIS_MARKER) + assert len(ws) >= 2 + + # Deal with exact matches possibly needed at one or both ends. + startpos, endpos = 0, len(got) + w = ws[0] + if w: # starts with exact match + if got.startswith(w): + startpos = len(w) + del ws[0] + else: + return False + w = ws[-1] + if w: # ends with exact match + if got.endswith(w): + endpos -= len(w) + del ws[-1] + else: + return False + + if startpos > endpos: + # Exact end matches required more characters than we have, as in + # _ellipsis_match('aa...aa', 'aaa') + return False + + # For the rest, we only need to find the leftmost non-overlapping + # match for each piece. If there's no overall match that way alone, + # there's no overall match period. + for w in ws: + # w may be '' at times, if there are consecutive ellipses, or + # due to an ellipsis at the start or end of `want`. That's OK. + # Search for an empty string succeeds, and doesn't change startpos. + startpos = got.find(w, startpos, endpos) + if startpos < 0: + return False + startpos += len(w) + + return True + +def _comment_line(line): + "Return a commented form of the given line" + line = line.rstrip() + if line: + return '# '+line + else: + return '#' + +class _OutputRedirectingPdb(pdb.Pdb): + """ + A specialized version of the python debugger that redirects stdout + to a given stream when interacting with the user. Stdout is *not* + redirected when traced code is executed. + """ + def __init__(self, out): + self.__out = out + pdb.Pdb.__init__(self) + + def trace_dispatch(self, *args): + # Redirect stdout to the given stream. + save_stdout = sys.stdout + sys.stdout = self.__out + # Call Pdb's trace dispatch method. + try: + return pdb.Pdb.trace_dispatch(self, *args) + finally: + sys.stdout = save_stdout + +# [XX] Normalize with respect to os.path.pardir? +def _module_relative_path(module, path): + if not inspect.ismodule(module): + raise TypeError('Expected a module: %r' % module) + if path.startswith('/'): + raise ValueError('Module-relative files may not have absolute paths') + + # Find the base directory for the path. + if hasattr(module, '__file__'): + # A normal module/package + basedir = os.path.split(module.__file__)[0] + elif module.__name__ == '__main__': + # An interactive session. + if len(sys.argv)>0 and sys.argv[0] != '': + basedir = os.path.split(sys.argv[0])[0] + else: + basedir = os.curdir + else: + # A module w/o __file__ (this includes builtins) + raise ValueError("Can't resolve paths relative to the module " + + module + " (it has no __file__)") + + # Combine the base directory and the path. + return os.path.join(basedir, *(path.split('/'))) + +###################################################################### +## 2. Example & DocTest +###################################################################### +## - An "example" is a pair, where "source" is a +## fragment of source code, and "want" is the expected output for +## "source." The Example class also includes information about +## where the example was extracted from. +## +## - A "doctest" is a collection of examples, typically extracted from +## a string (such as an object's docstring). The DocTest class also +## includes information about where the string was extracted from. + +class Example: + """ + A single doctest example, consisting of source code and expected + output. `Example` defines the following attributes: + + - source: A single Python statement, always ending with a newline. + The constructor adds a newline if needed. + + - want: The expected output from running the source code (either + from stdout, or a traceback in case of exception). `want` ends + with a newline unless it's empty, in which case it's an empty + string. The constructor adds a newline if needed. + + - exc_msg: The exception message generated by the example, if + the example is expected to generate an exception; or `None` if + it is not expected to generate an exception. This exception + message is compared against the return value of + `traceback.format_exception_only()`. `exc_msg` ends with a + newline unless it's `None`. The constructor adds a newline + if needed. + + - lineno: The line number within the DocTest string containing + this Example where the Example begins. This line number is + zero-based, with respect to the beginning of the DocTest. + + - indent: The example's indentation in the DocTest string. + I.e., the number of space characters that preceed the + example's first prompt. + + - options: A dictionary mapping from option flags to True or + False, which is used to override default options for this + example. Any option flags not contained in this dictionary + are left at their default value (as specified by the + DocTestRunner's optionflags). By default, no options are set. + """ + def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, + options=None): + # Normalize inputs. + if not source.endswith('\n'): + source += '\n' + if want and not want.endswith('\n'): + want += '\n' + if exc_msg is not None and not exc_msg.endswith('\n'): + exc_msg += '\n' + # Store properties. + self.source = source + self.want = want + self.lineno = lineno + self.indent = indent + if options is None: options = {} + self.options = options + self.exc_msg = exc_msg + +class DocTest: + """ + A collection of doctest examples that should be run in a single + namespace. Each `DocTest` defines the following attributes: + + - examples: the list of examples. + + - globs: The namespace (aka globals) that the examples should + be run in. + + - name: A name identifying the DocTest (typically, the name of + the object whose docstring this DocTest was extracted from). + + - filename: The name of the file that this DocTest was extracted + from, or `None` if the filename is unknown. + + - lineno: The line number within filename where this DocTest + begins, or `None` if the line number is unavailable. This + line number is zero-based, with respect to the beginning of + the file. + + - docstring: The string that the examples were extracted from, + or `None` if the string is unavailable. + """ + def __init__(self, examples, globs, name, filename, lineno, docstring): + """ + Create a new DocTest containing the given examples. The + DocTest's globals are initialized with a copy of `globs`. + """ + assert not isinstance(examples, basestring), \ + "DocTest no longer accepts str; use DocTestParser instead" + self.examples = examples + self.docstring = docstring + self.globs = globs.copy() + self.name = name + self.filename = filename + self.lineno = lineno + + def __repr__(self): + if len(self.examples) == 0: + examples = 'no examples' + elif len(self.examples) == 1: + examples = '1 example' + else: + examples = '%d examples' % len(self.examples) + return ('' % + (self.name, self.filename, self.lineno, examples)) + + + # This lets us sort tests by name: + def __cmp__(self, other): + if not isinstance(other, DocTest): + return -1 + return cmp((self.name, self.filename, self.lineno, id(self)), + (other.name, other.filename, other.lineno, id(other))) + +###################################################################### +## 3. DocTestParser +###################################################################### + +class DocTestParser: + """ + A class used to parse strings containing doctest examples. + """ + # This regular expression is used to find doctest examples in a + # string. It defines three groups: `source` is the source code + # (including leading indentation and prompts); `indent` is the + # indentation of the first (PS1) line of the source code; and + # `want` is the expected output (including leading indentation). + _EXAMPLE_RE = re.compile(r''' + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P + (?:^(?P [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + .*$\n? # But any other line + )*) + ''', re.MULTILINE | re.VERBOSE) + + # A regular expression for handling `want` strings that contain + # expected exceptions. It divides `want` into three pieces: + # - the traceback header line (`hdr`) + # - the traceback stack (`stack`) + # - the exception message (`msg`), as generated by + # traceback.format_exception_only() + # `msg` may have multiple lines. We assume/require that the + # exception message is the first non-indented line starting with a word + # character following the traceback header line. + _EXCEPTION_RE = re.compile(r""" + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P .*?) # don't blink: absorb stuff until... + ^ (?P \w+ .*) # a line *starts* with alphanum. + """, re.VERBOSE | re.MULTILINE | re.DOTALL) + + # A callable returning a true value iff its argument is a blank line + # or contains a single comment. + _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match + + def parse(self, string, name=''): + """ + Divide the given string into examples and intervening text, + and return them as a list of alternating Examples and strings. + Line numbers for the Examples are 0-based. The optional + argument `name` is a name identifying this string, and is only + used for error messages. + """ + string = string.expandtabs() + # If all lines begin with the same indentation, then strip it. + min_indent = self._min_indent(string) + if min_indent > 0: + string = '\n'.join([l[min_indent:] for l in string.split('\n')]) + + output = [] + charno, lineno = 0, 0 + # Find all doctest examples in the string: + for m in self._EXAMPLE_RE.finditer(string): + # Add the pre-example text to `output`. + output.append(string[charno:m.start()]) + # Update lineno (lines before this example) + lineno += string.count('\n', charno, m.start()) + # Extract info from the regexp match. + (source, options, want, exc_msg) = \ + self._parse_example(m, name, lineno) + # Create an Example, and add it to the list. + if not self._IS_BLANK_OR_COMMENT(source): + output.append( Example(source, want, exc_msg, + lineno=lineno, + indent=min_indent+len(m.group('indent')), + options=options) ) + # Update lineno (lines inside this example) + lineno += string.count('\n', m.start(), m.end()) + # Update charno. + charno = m.end() + # Add any remaining post-example text to `output`. + output.append(string[charno:]) + return output + + def get_doctest(self, string, globs, name, filename, lineno): + """ + Extract all doctest examples from the given string, and + collect them into a `DocTest` object. + + `globs`, `name`, `filename`, and `lineno` are attributes for + the new `DocTest` object. See the documentation for `DocTest` + for more information. + """ + return DocTest(self.get_examples(string, name), globs, + name, filename, lineno, string) + + def get_examples(self, string, name=''): + """ + Extract all doctest examples from the given string, and return + them as a list of `Example` objects. Line numbers are + 0-based, because it's most common in doctests that nothing + interesting appears on the same line as opening triple-quote, + and so the first interesting line is called \"line 1\" then. + + The optional argument `name` is a name identifying this + string, and is only used for error messages. + """ + return [x for x in self.parse(string, name) + if isinstance(x, Example)] + + def _parse_example(self, m, name, lineno): + """ + Given a regular expression match from `_EXAMPLE_RE` (`m`), + return a pair `(source, want)`, where `source` is the matched + example's source code (with prompts and indentation stripped); + and `want` is the example's expected output (with indentation + stripped). + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + # Get the example's indentation level. + indent = len(m.group('indent')) + + # Divide source into lines; check that they're properly + # indented; and then strip their indentation & prompts. + source_lines = m.group('source').split('\n') + self._check_prompt_blank(source_lines, indent, name, lineno) + self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) + source = '\n'.join([sl[indent+4:] for sl in source_lines]) + + # Divide want into lines; check that it's properly indented; and + # then strip the indentation. Spaces before the last newline should + # be preserved, so plain rstrip() isn't good enough. + want = m.group('want') + want_lines = want.split('\n') + if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): + del want_lines[-1] # forget final newline & spaces after it + self._check_prefix(want_lines, ' '*indent, name, + lineno + len(source_lines)) + want = '\n'.join([wl[indent:] for wl in want_lines]) + + # If `want` contains a traceback message, then extract it. + m = self._EXCEPTION_RE.match(want) + if m: + exc_msg = m.group('msg') + else: + exc_msg = None + + # Extract options from the source. + options = self._find_options(source, name, lineno) + + return source, options, want, exc_msg + + # This regular expression looks for option directives in the + # source code of an example. Option directives are comments + # starting with "doctest:". Warning: this may give false + # positives for string-literals that contain the string + # "#doctest:". Eliminating these false positives would require + # actually parsing the string; but we limit them by ignoring any + # line containing "#doctest:" that is *followed* by a quote mark. + _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', + re.MULTILINE) + + def _find_options(self, source, name, lineno): + """ + Return a dictionary containing option overrides extracted from + option directives in the given source string. + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + options = {} + # (note: with the current regexp, this will match at most once:) + for m in self._OPTION_DIRECTIVE_RE.finditer(source): + option_strings = m.group(1).replace(',', ' ').split() + for option in option_strings: + if (option[0] not in '+-' or + option[1:] not in OPTIONFLAGS_BY_NAME): + raise ValueError('line %r of the doctest for %s ' + 'has an invalid option: %r' % + (lineno+1, name, option)) + flag = OPTIONFLAGS_BY_NAME[option[1:]] + options[flag] = (option[0] == '+') + if options and self._IS_BLANK_OR_COMMENT(source): + raise ValueError('line %r of the doctest for %s has an option ' + 'directive on a line with no example: %r' % + (lineno, name, source)) + return options + + # This regular expression finds the indentation of every non-blank + # line in a string. + _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE) + + def _min_indent(self, s): + "Return the minimum indentation of any non-blank line in `s`" + indents = [len(indent) for indent in self._INDENT_RE.findall(s)] + if len(indents) > 0: + return min(indents) + else: + return 0 + + def _check_prompt_blank(self, lines, indent, name, lineno): + """ + Given the lines of a source string (including prompts and + leading indentation), check to make sure that every prompt is + followed by a space character. If any line is not followed by + a space character, then raise ValueError. + """ + for i, line in enumerate(lines): + if len(line) >= indent+4 and line[indent+3] != ' ': + raise ValueError('line %r of the docstring for %s ' + 'lacks blank after %s: %r' % + (lineno+i+1, name, + line[indent:indent+3], line)) + + def _check_prefix(self, lines, prefix, name, lineno): + """ + Check that every line in the given list starts with the given + prefix; if any line does not, then raise a ValueError. + """ + for i, line in enumerate(lines): + if line and not line.startswith(prefix): + raise ValueError('line %r of the docstring for %s has ' + 'inconsistent leading whitespace: %r' % + (lineno+i+1, name, line)) + + +###################################################################### +## 4. DocTest Finder +###################################################################### + +class DocTestFinder: + """ + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + """ + + def __init__(self, verbose=False, parser=DocTestParser(), + recurse=True, _namefilter=None, exclude_empty=True): + """ + Create a new doctest finder. + + The optional argument `parser` specifies a class or + function that should be used to create new DocTest objects (or + objects that implement the same interface as DocTest). The + signature for this factory function should match the signature + of the DocTest constructor. + + If the optional argument `recurse` is false, then `find` will + only examine the given object, and not any contained objects. + + If the optional argument `exclude_empty` is false, then `find` + will include tests for objects with empty docstrings. + """ + self._parser = parser + self._verbose = verbose + self._recurse = recurse + self._exclude_empty = exclude_empty + # _namefilter is undocumented, and exists only for temporary backward- + # compatibility support of testmod's deprecated isprivate mess. + self._namefilter = _namefilter + + def find(self, obj, name=None, module=None, globs=None, + extraglobs=None): + """ + Return a list of the DocTests that are defined by the given + object's docstring, or by any of its contained objects' + docstrings. + + The optional parameter `module` is the module that contains + the given object. If the module is not specified or is None, then + the test finder will attempt to automatically determine the + correct module. The object's module is used: + + - As a default namespace, if `globs` is not specified. + - To prevent the DocTestFinder from extracting DocTests + from objects that are imported from other modules. + - To find the name of the file containing the object. + - To help find the line number of the object within its + file. + + Contained objects whose module does not match `module` are ignored. + + If `module` is False, no attempt to find the module will be made. + This is obscure, of use mostly in tests: if `module` is False, or + is None but cannot be found automatically, then all objects are + considered to belong to the (non-existent) module, so all contained + objects will (recursively) be searched for doctests. + + The globals for each DocTest is formed by combining `globs` + and `extraglobs` (bindings in `extraglobs` override bindings + in `globs`). A new copy of the globals dictionary is created + for each DocTest. If `globs` is not specified, then it + defaults to the module's `__dict__`, if specified, or {} + otherwise. If `extraglobs` is not specified, then it defaults + to {}. + + """ + # If name was not specified, then extract it from the object. + if name is None: + name = getattr(obj, '__name__', None) + if name is None: + raise ValueError("DocTestFinder.find: name must be given " + "when obj.__name__ doesn't exist: %r" % + (type(obj),)) + + # Find the module that contains the given object (if obj is + # a module, then module=obj.). Note: this may fail, in which + # case module will be None. + if module is False: + module = None + elif module is None: + module = inspect.getmodule(obj) + + # Read the module's source code. This is used by + # DocTestFinder._find_lineno to find the line number for a + # given object's docstring. + try: + file = inspect.getsourcefile(obj) or inspect.getfile(obj) + source_lines = linecache.getlines(file) + if not source_lines: + source_lines = None + except TypeError: + source_lines = None + + # Initialize globals, and merge in extraglobs. + if globs is None: + if module is None: + globs = {} + else: + globs = module.__dict__.copy() + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + + # Recursively expore `obj`, extracting DocTests. + tests = [] + self._find(tests, obj, name, module, source_lines, globs, {}) + return tests + + def _filter(self, obj, prefix, base): + """ + Return true if the given object should not be examined. + """ + return (self._namefilter is not None and + self._namefilter(prefix, base)) + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.isfunction(object): + return module.__dict__ is func_globals(object) + elif inspect.isclass(object): + return module.__name__ == object.__module__ + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + if self._verbose: + print('Finding tests in %s' % name) + + # If we've already processed this object, then ignore it. + if id(obj) in seen: + return + seen[id(obj)] = 1 + + # Find a test for this object, and add it to the list of tests. + test = self._get_test(obj, name, module, globs, source_lines) + if test is not None: + tests.append(test) + + # Look for tests in a module's contained objects. + if inspect.ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Check if this contained object should be ignored. + if self._filter(val, name, valname): + continue + valname = '%s.%s' % (name, valname) + # Recurse to functions & classes. + if ((inspect.isfunction(val) or inspect.isclass(val)) and + self._from_module(module, val)): + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a module's __test__ dictionary. + if inspect.ismodule(obj) and self._recurse: + for valname, val in getattr(obj, '__test__', {}).items(): + if not isinstance(valname, basestring): + raise ValueError("DocTestFinder.find: __test__ keys " + "must be strings: %r" % + (type(valname),)) + if not (inspect.isfunction(val) or inspect.isclass(val) or + inspect.ismethod(val) or inspect.ismodule(val) or + isinstance(val, basestring)): + raise ValueError("DocTestFinder.find: __test__ values " + "must be strings, functions, methods, " + "classes, or modules: %r" % + (type(val),)) + valname = '%s.__test__.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if inspect.isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Check if this contained object should be ignored. + if self._filter(val, name, valname): + continue + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = im_func(getattr(obj, valname)) + + # Recurse to methods, properties, and nested classes. + if ((inspect.isfunction(val) or inspect.isclass(val) or + isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + def _get_test(self, obj, name, module, globs, source_lines): + """ + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + """ + # Extract the object's docstring. If it doesn't have one, + # then return None (no test for this object). + if isinstance(obj, basestring): + docstring = obj + else: + try: + if obj.__doc__ is None: + docstring = '' + else: + docstring = obj.__doc__ + if not isinstance(docstring, basestring): + docstring = str(docstring) + except (TypeError, AttributeError): + docstring = '' + + # Find the docstring's location in the file. + lineno = self._find_lineno(obj, source_lines) + + # Don't bother if the docstring is empty. + if self._exclude_empty and not docstring: + return None + + # Return a DocTest for this object. + if module is None: + filename = None + else: + filename = getattr(module, '__file__', module.__name__) + if filename[-4:] in (".pyc", ".pyo"): + filename = filename[:-1] + return self._parser.get_doctest(docstring, globs, name, + filename, lineno) + + def _find_lineno(self, obj, source_lines): + """ + Return a line number of the given object's docstring. Note: + this method assumes that the object has a docstring. + """ + lineno = None + + # Find the line number for modules. + if inspect.ismodule(obj): + lineno = 0 + + # Find the line number for classes. + # Note: this could be fooled if a class is defined multiple + # times in a single file. + if inspect.isclass(obj): + if source_lines is None: + return None + pat = re.compile(r'^\s*class\s*%s\b' % + getattr(obj, '__name__', '-')) + for i, line in enumerate(source_lines): + if pat.match(line): + lineno = i + break + + # Find the line number for functions & methods. + if inspect.ismethod(obj): obj = im_func(obj) + if inspect.isfunction(obj): obj = func_code(obj) + if inspect.istraceback(obj): obj = obj.tb_frame + if inspect.isframe(obj): obj = obj.f_code + if inspect.iscode(obj): + lineno = getattr(obj, 'co_firstlineno', None)-1 + + # Find the line number where the docstring starts. Assume + # that it's the first line that begins with a quote mark. + # Note: this could be fooled by a multiline function + # signature, where a continuation line begins with a quote + # mark. + if lineno is not None: + if source_lines is None: + return lineno+1 + pat = re.compile('(^|.*:)\s*\w*("|\')') + for lineno in range(lineno, len(source_lines)): + if pat.match(source_lines[lineno]): + return lineno + + # We couldn't find the line number. + return None + +###################################################################### +## 5. DocTest Runner +###################################################################### + +class DocTestRunner: + """ + A class used to run DocTest test cases, and accumulate statistics. + The `run` method is used to process a single DocTest case. It + returns a tuple `(f, t)`, where `t` is the number of test cases + tried, and `f` is the number of test cases that failed. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> for test in tests: + ... print runner.run(test) + (0, 2) + (0, 1) + (0, 2) + (0, 2) + + The `summarize` method prints a summary of all the test cases that + have been run by the runner, and returns an aggregated `(f, t)` + tuple: + + >>> runner.summarize(verbose=1) + 4 items passed all tests: + 2 tests in _TestClass + 2 tests in _TestClass.__init__ + 2 tests in _TestClass.get + 1 tests in _TestClass.square + 7 tests in 4 items. + 7 passed and 0 failed. + Test passed. + (0, 7) + + The aggregated number of tried examples and failed examples is + also available via the `tries` and `failures` attributes: + + >>> runner.tries + 7 + >>> runner.failures + 0 + + The comparison between expected outputs and actual outputs is done + by an `OutputChecker`. This comparison may be customized with a + number of option flags; see the documentation for `testmod` for + more information. If the option flags are insufficient, then the + comparison may also be customized by passing a subclass of + `OutputChecker` to the constructor. + + The test runner's display output can be controlled in two ways. + First, an output function (`out) can be passed to + `TestRunner.run`; this function will be called with strings that + should be displayed. It defaults to `sys.stdout.write`. If + capturing the output is not sufficient, then the display output + can be also customized by subclassing DocTestRunner, and + overriding the methods `report_start`, `report_success`, + `report_unexpected_exception`, and `report_failure`. + """ + # This divider string is used to separate failure messages, and to + # separate sections of the summary. + DIVIDER = "*" * 70 + + def __init__(self, checker=None, verbose=None, optionflags=0): + """ + Create a new test runner. + + Optional keyword arg `checker` is the `OutputChecker` that + should be used to compare the expected outputs and actual + outputs of doctest examples. + + Optional keyword arg 'verbose' prints lots of stuff if true, + only failures if false; by default, it's true iff '-v' is in + sys.argv. + + Optional argument `optionflags` can be used to control how the + test runner compares expected output to actual output, and how + it displays failures. See the documentation for `testmod` for + more information. + """ + self._checker = checker or OutputChecker() + if verbose is None: + verbose = '-v' in sys.argv + self._verbose = verbose + self.optionflags = optionflags + self.original_optionflags = optionflags + + # Keep track of the examples we've run. + self.tries = 0 + self.failures = 0 + self._name2ft = {} + + # Create a fake output target for capturing doctest output. + self._fakeout = _SpoofOut() + + #///////////////////////////////////////////////////////////////// + # Reporting methods + #///////////////////////////////////////////////////////////////// + + def report_start(self, out, test, example): + """ + Report that the test runner is about to process the given + example. (Only displays a message if verbose=True) + """ + if self._verbose: + if example.want: + out('Trying:\n' + _indent(example.source) + + 'Expecting:\n' + _indent(example.want)) + else: + out('Trying:\n' + _indent(example.source) + + 'Expecting nothing\n') + + def report_success(self, out, test, example, got): + """ + Report that the given example ran successfully. (Only + displays a message if verbose=True) + """ + if self._verbose: + out("ok\n") + + def report_failure(self, out, test, example, got): + """ + Report that the given example failed. + """ + out(self._failure_header(test, example) + + self._checker.output_difference(example, got, self.optionflags)) + + def report_unexpected_exception(self, out, test, example, exc_info): + """ + Report that the given example raised an unexpected exception. + """ + out(self._failure_header(test, example) + + 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) + + def _failure_header(self, test, example): + out = [self.DIVIDER] + if test.filename: + if test.lineno is not None and example.lineno is not None: + lineno = test.lineno + example.lineno + 1 + else: + lineno = '?' + out.append('File "%s", line %s, in %s' % + (test.filename, lineno, test.name)) + else: + out.append('Line %s, in %s' % (example.lineno+1, test.name)) + out.append('Failed example:') + source = example.source + out.append(_indent(source)) + return '\n'.join(out) + + #///////////////////////////////////////////////////////////////// + # DocTest Running + #///////////////////////////////////////////////////////////////// + + def __run(self, test, compileflags, out): + """ + Run the examples in `test`. Write the outcome of each example + with one of the `DocTestRunner.report_*` methods, using the + writer function `out`. `compileflags` is the set of compiler + flags that should be used to execute examples. Return a tuple + `(f, t)`, where `t` is the number of examples tried, and `f` + is the number of examples that failed. The examples are run + in the namespace `test.globs`. + """ + # Keep track of the number of failures and tries. + failures = tries = 0 + + # Save the option flags (since option directives can be used + # to modify them). + original_optionflags = self.optionflags + + SUCCESS, FAILURE, BOOM = range(3) # `outcome` state + + check = self._checker.check_output + + # Process each example. + for examplenum, example in enumerate(test.examples): + + # If REPORT_ONLY_FIRST_FAILURE is set, then supress + # reporting after the first failure. + quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and + failures > 0) + + # Merge in the example's options. + self.optionflags = original_optionflags + if example.options: + for (optionflag, val) in example.options.items(): + if val: + self.optionflags |= optionflag + else: + self.optionflags &= ~optionflag + + # Record that we started this example. + tries += 1 + if not quiet: + self.report_start(out, test, example) + + # Use a special filename for compile(), so we can retrieve + # the source code during interactive debugging (see + # __patched_linecache_getlines). + filename = '' % (test.name, examplenum) + + # Run the example in the given context (globs), and record + # any exception that gets raised. (But don't intercept + # keyboard interrupts.) + try: + # Don't blink! This is where the user's code gets run. + exec_(compile(example.source, filename, "single", + compileflags, 1), test.globs) + self.debugger.set_continue() # ==== Example Finished ==== + exception = None + except KeyboardInterrupt: + raise + except: + exception = sys.exc_info() + self.debugger.set_continue() # ==== Example Finished ==== + + got = self._fakeout.getvalue() # the actual output + self._fakeout.truncate(0) + outcome = FAILURE # guilty until proved innocent or insane + + # If the example executed without raising any exceptions, + # verify its output. + if exception is None: + if check(example.want, got, self.optionflags): + outcome = SUCCESS + + # The example raised an exception: check if it was expected. + else: + exc_info = sys.exc_info() + exc_msg = traceback.format_exception_only(*exc_info[:2])[-1] + if not quiet: + got += _exception_traceback(exc_info) + + # If `example.exc_msg` is None, then we weren't expecting + # an exception. + if example.exc_msg is None: + outcome = BOOM + + # We expected an exception: see whether it matches. + elif check(example.exc_msg, exc_msg, self.optionflags): + outcome = SUCCESS + + # Another chance if they didn't care about the detail. + elif self.optionflags & IGNORE_EXCEPTION_DETAIL: + m1 = re.match(r'[^:]*:', example.exc_msg) + m2 = re.match(r'[^:]*:', exc_msg) + if m1 and m2 and check(m1.group(0), m2.group(0), + self.optionflags): + outcome = SUCCESS + + # Report the outcome. + if outcome is SUCCESS: + if not quiet: + self.report_success(out, test, example, got) + elif outcome is FAILURE: + if not quiet: + self.report_failure(out, test, example, got) + failures += 1 + elif outcome is BOOM: + if not quiet: + self.report_unexpected_exception(out, test, example, + exc_info) + failures += 1 + else: + assert False, ("unknown outcome", outcome) + + # Restore the option flags (in case they were modified) + self.optionflags = original_optionflags + + # Record and return the number of failures and tries. + self.__record_outcome(test, failures, tries) + return failures, tries + + def __record_outcome(self, test, f, t): + """ + Record the fact that the given DocTest (`test`) generated `f` + failures out of `t` tried examples. + """ + f2, t2 = self._name2ft.get(test.name, (0,0)) + self._name2ft[test.name] = (f+f2, t+t2) + self.failures += f + self.tries += t + + __LINECACHE_FILENAME_RE = re.compile(r'[\w\.]+)' + r'\[(?P\d+)\]>$') + def __patched_linecache_getlines(self, filename, module_globals=None): + m = self.__LINECACHE_FILENAME_RE.match(filename) + if m and m.group('name') == self.test.name: + example = self.test.examples[int(m.group('examplenum'))] + return example.source.splitlines(True) + elif func_code(self.save_linecache_getlines).co_argcount > 1: + return self.save_linecache_getlines(filename, module_globals) + else: + return self.save_linecache_getlines(filename) + + def run(self, test, compileflags=None, out=None, clear_globs=True): + """ + Run the examples in `test`, and display the results using the + writer function `out`. + + The examples are run in the namespace `test.globs`. If + `clear_globs` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use `clear_globs=False`. + + `compileflags` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to `globs`. + + The output of each example is checked using + `DocTestRunner.check_output`, and the results are formatted by + the `DocTestRunner.report_*` methods. + """ + self.test = test + + if compileflags is None: + compileflags = _extract_future_flags(test.globs) + + save_stdout = sys.stdout + if out is None: + out = save_stdout.write + sys.stdout = self._fakeout + + # Patch pdb.set_trace to restore sys.stdout during interactive + # debugging (so it's not still redirected to self._fakeout). + # Note that the interactive output will go to *our* + # save_stdout, even if that's not the real sys.stdout; this + # allows us to write test cases for the set_trace behavior. + save_set_trace = pdb.set_trace + self.debugger = _OutputRedirectingPdb(save_stdout) + self.debugger.reset() + pdb.set_trace = self.debugger.set_trace + + # Patch linecache.getlines, so we can see the example's source + # when we're inside the debugger. + self.save_linecache_getlines = linecache.getlines + linecache.getlines = self.__patched_linecache_getlines + + try: + return self.__run(test, compileflags, out) + finally: + sys.stdout = save_stdout + pdb.set_trace = save_set_trace + linecache.getlines = self.save_linecache_getlines + if clear_globs: + test.globs.clear() + + #///////////////////////////////////////////////////////////////// + # Summarization + #///////////////////////////////////////////////////////////////// + def summarize(self, verbose=None): + """ + Print a summary of all the test cases that have been run by + this DocTestRunner, and return a tuple `(f, t)`, where `f` is + the total number of failed examples, and `t` is the total + number of tried examples. + + The optional `verbose` argument controls how detailed the + summary is. If the verbosity is not specified, then the + DocTestRunner's verbosity is used. + """ + if verbose is None: + verbose = self._verbose + notests = [] + passed = [] + failed = [] + totalt = totalf = 0 + for x in self._name2ft.items(): + name, (f, t) = x + assert f <= t + totalt += t + totalf += f + if t == 0: + notests.append(name) + elif f == 0: + passed.append( (name, t) ) + else: + failed.append(x) + if verbose: + if notests: + print(len(notests), "items had no tests:") + notests.sort() + for thing in notests: + print(" ", thing) + if passed: + print(len(passed), "items passed all tests:") + passed.sort() + for thing, count in passed: + print(" %3d tests in %s" % (count, thing)) + if failed: + print(self.DIVIDER) + print(len(failed), "items had failures:") + failed.sort() + for thing, (f, t) in failed: + print(" %3d of %3d in %s" % (f, t, thing)) + if verbose: + print(totalt, "tests in", len(self._name2ft), "items.") + print(totalt - totalf, "passed and", totalf, "failed.") + if totalf: + print("***Test Failed***", totalf, "failures.") + elif verbose: + print("Test passed.") + return totalf, totalt + + #///////////////////////////////////////////////////////////////// + # Backward compatibility cruft to maintain doctest.master. + #///////////////////////////////////////////////////////////////// + def merge(self, other): + d = self._name2ft + for name, (f, t) in other._name2ft.items(): + if name in d: + print("*** DocTestRunner.merge: '" + name + "' in both" \ + " testers; summing outcomes.") + f2, t2 = d[name] + f = f + f2 + t = t + t2 + d[name] = f, t + +class OutputChecker: + """ + A class used to check the whether the actual output from a doctest + example matches the expected output. `OutputChecker` defines two + methods: `check_output`, which compares a given pair of outputs, + and returns true if they match; and `output_difference`, which + returns a string describing the differences between two outputs. + """ + def check_output(self, want, got, optionflags): + """ + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + """ + # Handle the common case first, for efficiency: + # if they're string-identical, always return true. + if got == want: + return True + + # The values True and False replaced 1 and 0 as the return + # value for boolean comparisons in Python 2.3. + if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): + if (got,want) == ("True\n", "1\n"): + return True + if (got,want) == ("False\n", "0\n"): + return True + + # can be used as a special sequence to signify a + # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + # Replace in want with a blank line. + want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), + '', want) + # If a line in got contains only spaces, then remove the + # spaces. + got = re.sub('(?m)^\s*?$', '', got) + if got == want: + return True + + # This flag causes doctest to ignore any differences in the + # contents of whitespace strings. Note that this can be used + # in conjunction with the ELLIPSIS flag. + if optionflags & NORMALIZE_WHITESPACE: + got = ' '.join(got.split()) + want = ' '.join(want.split()) + if got == want: + return True + + # The ELLIPSIS flag says to let the sequence "..." in `want` + # match any substring in `got`. + if optionflags & ELLIPSIS: + if _ellipsis_match(want, got): + return True + + # We didn't find any match; return false. + return False + + # Should we do a fancy diff? + def _do_a_fancy_diff(self, want, got, optionflags): + # Not unless they asked for a fancy diff. + if not optionflags & (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF): + return False + + # If expected output uses ellipsis, a meaningful fancy diff is + # too hard ... or maybe not. In two real-life failures Tim saw, + # a diff was a major help anyway, so this is commented out. + # [todo] _ellipsis_match() knows which pieces do and don't match, + # and could be the basis for a kick-ass diff in this case. + ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: + ## return False + + # ndiff does intraline difference marking, so can be useful even + # for 1-line differences. + if optionflags & REPORT_NDIFF: + return True + + # The other diff types need at least a few lines to be helpful. + return want.count('\n') > 2 and got.count('\n') > 2 + + def output_difference(self, example, got, optionflags): + """ + Return a string describing the differences between the + expected output for a given example (`example`) and the actual + output (`got`). `optionflags` is the set of option flags used + to compare `want` and `got`. + """ + want = example.want + # If s are being used, then replace blank lines + # with in the actual output string. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) + + # Check if we should use diff. + if self._do_a_fancy_diff(want, got, optionflags): + # Split want & got into lines. + want_lines = want.splitlines(True) # True == keep line ends + got_lines = got.splitlines(True) + # Use difflib to find their differences. + if optionflags & REPORT_UDIFF: + diff = difflib.unified_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'unified diff with -expected +actual' + elif optionflags & REPORT_CDIFF: + diff = difflib.context_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'context diff with expected followed by actual' + elif optionflags & REPORT_NDIFF: + engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) + diff = list(engine.compare(want_lines, got_lines)) + kind = 'ndiff with -expected +actual' + else: + assert 0, 'Bad diff option' + # Remove trailing whitespace on diff output. + diff = [line.rstrip() + '\n' for line in diff] + return 'Differences (%s):\n' % kind + _indent(''.join(diff)) + + # If we're not using diff, then simply list the expected + # output followed by the actual output. + if want and got: + return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) + elif want: + return 'Expected:\n%sGot nothing\n' % _indent(want) + elif got: + return 'Expected nothing\nGot:\n%s' % _indent(got) + else: + return 'Expected nothing\nGot nothing\n' + +class DocTestFailure(Exception): + """A DocTest example has failed in debugging mode. + + The exception instance has variables: + + - test: the DocTest object being run + + - excample: the Example object that failed + + - got: the actual output + """ + def __init__(self, test, example, got): + self.test = test + self.example = example + self.got = got + + def __str__(self): + return str(self.test) + +class UnexpectedException(Exception): + """A DocTest example has encountered an unexpected exception + + The exception instance has variables: + + - test: the DocTest object being run + + - excample: the Example object that failed + + - exc_info: the exception info + """ + def __init__(self, test, example, exc_info): + self.test = test + self.example = example + self.exc_info = exc_info + + def __str__(self): + return str(self.test) + +class DebugRunner(DocTestRunner): + r"""Run doc tests but raise an exception as soon as there is a failure. + + If an unexpected exception occurs, an UnexpectedException is raised. + It contains the test, the example, and the original exception: + + >>> runner = DebugRunner(verbose=False) + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> try: + ... runner.run(test) + ... except UnexpectedException, failure: + ... pass + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[0], exc_info[1], exc_info[2] + Traceback (most recent call last): + ... + KeyError + + We wrap the original exception to give the calling application + access to the test and example information. + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> try: + ... runner.run(test) + ... except DocTestFailure, failure: + ... pass + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + If a failure or error occurs, the globals are left intact: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 1} + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... >>> raise KeyError + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + Traceback (most recent call last): + ... + UnexpectedException: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 2} + + But the globals are cleared if there is no error: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + (0, 1) + + >>> test.globs + {} + + """ + + def run(self, test, compileflags=None, out=None, clear_globs=True): + r = DocTestRunner.run(self, test, compileflags, out, False) + if clear_globs: + test.globs.clear() + return r + + def report_unexpected_exception(self, out, test, example, exc_info): + raise UnexpectedException(test, example, exc_info) + + def report_failure(self, out, test, example, got): + raise DocTestFailure(test, example, got) + +###################################################################### +## 6. Test Functions +###################################################################### +# These should be backwards compatible. + +# For backward compatibility, a global instance of a DocTestRunner +# class, updated by testmod. +master = None + +def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None, + report=True, optionflags=0, extraglobs=None, + raise_on_error=False, exclude_empty=False): + """m=None, name=None, globs=None, verbose=None, isprivate=None, + report=True, optionflags=0, extraglobs=None, raise_on_error=False, + exclude_empty=False + + Test examples in docstrings in functions and classes reachable + from module m (or the current module if m is not supplied), starting + with m.__doc__. Unless isprivate is specified, private names + are not skipped. + + Also test examples reachable from dict m.__test__ if it exists and is + not None. m.__test__ maps names to functions, classes and strings; + function and class docstrings are tested even if the name is private; + strings are tested directly, as if they were docstrings. + + Return (#failures, #tests). + + See doctest.__doc__ for an overview. + + Optional keyword arg "name" gives the name of the module; by default + use m.__name__. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use m.__dict__. A copy of this + dict is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. This is new in 2.4. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. This is new in 2.3. Possible values (see the + docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Deprecated in Python 2.4: + Optional keyword arg "isprivate" specifies a function used to + determine whether a name is private. The default function is + treat all functions as public. Optionally, "isprivate" can be + set to doctest.is_private to skip over functions marked as private + using the underscore naming convention; see its docs for details. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + if isprivate is not None: + warnings.warn("the isprivate argument is deprecated; " + "examine DocTestFinder.find() lists instead", + DeprecationWarning) + + # If no module was given, then use __main__. + if m is None: + # DWA - m will still be None if this wasn't invoked from the command + # line, in which case the following TypeError is about as good an error + # as we should expect + m = sys.modules.get('__main__') + + # Check that we were actually given a module. + if not inspect.ismodule(m): + raise TypeError("testmod: module required; %r" % (m,)) + + # If no name was given, then use the module's name. + if name is None: + name = m.__name__ + + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty) + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return runner.failures, runner.tries + +def testfile(filename, module_relative=True, name=None, package=None, + globs=None, verbose=None, report=True, optionflags=0, + extraglobs=None, raise_on_error=False, parser=DocTestParser()): + """ + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg "module_relative" specifies how filenames + should be interpreted: + + - If "module_relative" is True (the default), then "filename" + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + "package" argument is specified, then it is relative to that + package. To ensure os-independence, "filename" should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If "module_relative" is False, then "filename" specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg "name" gives the name of the test; by default + use the file's basename. + + Optional keyword argument "package" is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify "package" if "module_relative" is False. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg "parser" specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path + if module_relative: + package = _normalize_module(package) + filename = _module_relative_path(package, filename) + + # If no name was given, then use the file's name. + if name is None: + name = os.path.basename(filename) + + # Assemble the globals. + if globs is None: + globs = {} + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + # Read the file, convert it to a test, and run it. + f = open(filename) + s = f.read() + f.close() + test = parser.get_doctest(s, globs, name, filename, 0) + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return runner.failures, runner.tries + +def run_docstring_examples(f, globs, verbose=False, name="NoName", + compileflags=None, optionflags=0): + """ + Test examples in the given object's docstring (`f`), using `globs` + as globals. Optional argument `name` is used in failure messages. + If the optional argument `verbose` is true, then generate output + even if there are no failures. + + `compileflags` gives the set of flags that should be used by the + Python compiler when running the examples. If not specified, then + it will default to the set of future-import flags that apply to + `globs`. + + Optional keyword arg `optionflags` specifies options for the + testing and output. See the documentation for `testmod` for more + information. + """ + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(verbose=verbose, recurse=False) + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + for test in finder.find(f, name, globs=globs): + runner.run(test, compileflags=compileflags) + +###################################################################### +## 7. Tester +###################################################################### +# This is provided only for backwards compatibility. It's not +# actually used in any way. + +class Tester: + def __init__(self, mod=None, globs=None, verbose=None, + isprivate=None, optionflags=0): + + warnings.warn("class Tester is deprecated; " + "use class doctest.DocTestRunner instead", + DeprecationWarning, stacklevel=2) + if mod is None and globs is None: + raise TypeError("Tester.__init__: must specify mod or globs") + if mod is not None and not inspect.ismodule(mod): + raise TypeError("Tester.__init__: mod must be a module; %r" % + (mod,)) + if globs is None: + globs = mod.__dict__ + self.globs = globs + + self.verbose = verbose + self.isprivate = isprivate + self.optionflags = optionflags + self.testfinder = DocTestFinder(_namefilter=isprivate) + self.testrunner = DocTestRunner(verbose=verbose, + optionflags=optionflags) + + def runstring(self, s, name): + test = DocTestParser().get_doctest(s, self.globs, name, None, None) + if self.verbose: + print("Running string", name) + (f,t) = self.testrunner.run(test) + if self.verbose: + print(f, "of", t, "examples failed in string", name) + return (f,t) + + def rundoc(self, object, name=None, module=None): + f = t = 0 + tests = self.testfinder.find(object, name, module=module, + globs=self.globs) + for test in tests: + (f2, t2) = self.testrunner.run(test) + (f,t) = (f+f2, t+t2) + return (f,t) + + def rundict(self, d, name, module=None): + import types + m = types.ModuleType(name) + m.__dict__.update(d) + if module is None: + module = False + return self.rundoc(m, name, module) + + def run__test__(self, d, name): + import types + m = types.ModuleType(name) + m.__test__ = d + return self.rundoc(m, name) + + def summarize(self, verbose=None): + return self.testrunner.summarize(verbose) + + def merge(self, other): + self.testrunner.merge(other.testrunner) + +###################################################################### +## 8. Unittest Support +###################################################################### + +_unittest_reportflags = 0 + +def set_unittest_reportflags(flags): + """Sets the unittest option flags. + + The old flag is returned so that a runner could restore the old + value if it wished to: + + >>> old = _unittest_reportflags + >>> set_unittest_reportflags(REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) == old + True + + >>> import doctest + >>> doctest._unittest_reportflags == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + + Only reporting flags can be set: + + >>> set_unittest_reportflags(ELLIPSIS) + Traceback (most recent call last): + ... + ValueError: ('Only reporting flags allowed', 8) + + >>> set_unittest_reportflags(old) == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + """ + global _unittest_reportflags + + if (flags & REPORTING_FLAGS) != flags: + raise ValueError("Only reporting flags allowed", flags) + old = _unittest_reportflags + _unittest_reportflags = flags + return old + + +class DocTestCase(unittest.TestCase): + + def __init__(self, test, optionflags=0, setUp=None, tearDown=None, + checker=None): + + unittest.TestCase.__init__(self) + self._dt_optionflags = optionflags + self._dt_checker = checker + self._dt_test = test + self._dt_setUp = setUp + self._dt_tearDown = tearDown + + def setUp(self): + test = self._dt_test + + if self._dt_setUp is not None: + self._dt_setUp(test) + + def tearDown(self): + test = self._dt_test + + if self._dt_tearDown is not None: + self._dt_tearDown(test) + + test.globs.clear() + + def runTest(self): + test = self._dt_test + old = sys.stdout + new = StringIO() + optionflags = self._dt_optionflags + + if not (optionflags & REPORTING_FLAGS): + # The option flags don't include any reporting flags, + # so add the default reporting flags + optionflags |= _unittest_reportflags + + runner = DocTestRunner(optionflags=optionflags, + checker=self._dt_checker, verbose=False) + + try: + runner.DIVIDER = "-"*70 + failures, tries = runner.run( + test, out=new.write, clear_globs=False) + finally: + sys.stdout = old + + if failures: + raise self.failureException(self.format_failure(new.getvalue())) + + def format_failure(self, err): + test = self._dt_test + if test.lineno is None: + lineno = 'unknown line number' + else: + lineno = '%s' % test.lineno + lname = '.'.join(test.name.split('.')[-1:]) + return ('Failed doctest test for %s\n' + ' File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def debug(self): + r"""Run the test case without results and without catching exceptions + + The unit test framework includes a debug method on test cases + and test suites to support post-mortem debugging. The test code + is run in such a way that errors are not caught. This way a + caller can catch the errors and initiate post-mortem debugging. + + The DocTestCase provides a debug method that raises + UnexpectedException errors if there is an unexepcted + exception: + + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + >>> try: + ... case.debug() + ... except UnexpectedException, failure: + ... pass + + The UnexpectedException contains the test, the example, and + the original exception: + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[0], exc_info[1], exc_info[2] + Traceback (most recent call last): + ... + KeyError + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + + >>> try: + ... case.debug() + ... except DocTestFailure, failure: + ... pass + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + """ + + self.setUp() + runner = DebugRunner(optionflags=self._dt_optionflags, + checker=self._dt_checker, verbose=False) + runner.run(self._dt_test) + self.tearDown() + + def id(self): + return self._dt_test.name + + def __repr__(self): + name = self._dt_test.name.split('.') + return "%s (%s)" % (name[-1], '.'.join(name[:-1])) + + __str__ = __repr__ + + def shortDescription(self): + return "Doctest: " + self._dt_test.name + +def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, + **options): + """ + Convert doctest tests for a module to a unittest test suite. + + This converts each documentation string in a module that + contains doctest tests to a unittest test case. If any of the + tests in a doc string fail, then the test case fails. An exception + is raised showing the name of the file containing the test and a + (sometimes approximate) line number. + + The `module` argument provides the module to be tested. The argument + can be either a module or a module name. + + If no argument is given, the calling module is used. + + A number of options may be provided as keyword arguments: + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + """ + + if test_finder is None: + test_finder = DocTestFinder() + + module = _normalize_module(module) + tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) + if globs is None: + globs = module.__dict__ + if not tests: + # Why do we want to do this? Because it reveals a bug that might + # otherwise be hidden. + raise ValueError(module, "has no tests") + + tests.sort() + suite = unittest.TestSuite() + for test in tests: + if len(test.examples) == 0: + continue + if not test.filename: + filename = module.__file__ + if filename[-4:] in (".pyc", ".pyo"): + filename = filename[:-1] + test.filename = filename + suite.addTest(DocTestCase(test, **options)) + + return suite + +class DocFileCase(DocTestCase): + + def id(self): + return '_'.join(self._dt_test.name.split('.')) + + def __repr__(self): + return self._dt_test.filename + __str__ = __repr__ + + def format_failure(self, err): + return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' + % (self._dt_test.name, self._dt_test.filename, err) + ) + +def DocFileTest(path, module_relative=True, package=None, + globs=None, parser=DocTestParser(), **options): + if globs is None: + globs = {} + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path. + if module_relative: + package = _normalize_module(package) + path = _module_relative_path(package, path) + + # Find the file and read it. + name = os.path.basename(path) + f = open(path) + doc = f.read() + f.close() + + # Convert it to a test, and wrap it in a DocFileCase. + test = parser.get_doctest(doc, globs, name, path, 0) + return DocFileCase(test, **options) + +def DocFileSuite(*paths, **kw): + """A unittest suite for one or more doctest files. + + The path to each doctest file is given as a string; the + interpretation of that string depends on the keyword argument + "module_relative". + + A number of options may be provided as keyword arguments: + + module_relative + If "module_relative" is True, then the given file paths are + interpreted as os-independent module-relative paths. By + default, these paths are relative to the calling module's + directory; but if the "package" argument is specified, then + they are relative to that package. To ensure os-independence, + "filename" should use "/" characters to separate path + segments, and may not be an absolute path (i.e., it may not + begin with "/"). + + If "module_relative" is False, then the given file paths are + interpreted as os-specific paths. These paths may be absolute + or relative (to the current working directory). + + package + A Python package or the name of a Python package whose directory + should be used as the base directory for module relative paths. + If "package" is not specified, then the calling module's + directory is used as the base directory for module relative + filenames. It is an error to specify "package" if + "module_relative" is False. + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + + parser + A DocTestParser (or subclass) that should be used to extract + tests from the files. + """ + suite = unittest.TestSuite() + + # We do this here so that _normalize_module is called at the right + # level. If it were called in DocFileTest, then this function + # would be the caller and we might guess the package incorrectly. + if kw.get('module_relative', True): + kw['package'] = _normalize_module(kw.get('package')) + + for path in paths: + suite.addTest(DocFileTest(path, **kw)) + + return suite + +###################################################################### +## 9. Debugging Support +###################################################################### + +def script_from_examples(s): + r"""Extract script from text with examples. + + Converts text with examples to a Python script. Example input is + converted to regular code. Example output and all other words + are converted to comments: + + >>> text = ''' + ... Here are examples of simple math. + ... + ... Python has super accurate integer addition + ... + ... >>> 2 + 2 + ... 5 + ... + ... And very friendly error messages: + ... + ... >>> 1/0 + ... To Infinity + ... And + ... Beyond + ... + ... You can use logic if you want: + ... + ... >>> if 0: + ... ... blah + ... ... blah + ... ... + ... + ... Ho hum + ... ''' + + >>> print script_from_examples(text) + # Here are examples of simple math. + # + # Python has super accurate integer addition + # + 2 + 2 + # Expected: + ## 5 + # + # And very friendly error messages: + # + 1/0 + # Expected: + ## To Infinity + ## And + ## Beyond + # + # You can use logic if you want: + # + if 0: + blah + blah + # + # Ho hum + """ + output = [] + for piece in DocTestParser().parse(s): + if isinstance(piece, Example): + # Add the example's source code (strip trailing NL) + output.append(piece.source[:-1]) + # Add the expected output: + want = piece.want + if want: + output.append('# Expected:') + output += ['## '+l for l in want.split('\n')[:-1]] + else: + # Add non-example text. + output += [_comment_line(l) + for l in piece.split('\n')[:-1]] + + # Trim junk on both ends. + while output and output[-1] == '#': + output.pop() + while output and output[0] == '#': + output.pop(0) + # Combine the output, and return it. + return '\n'.join(output) + +def testsource(module, name): + """Extract the test sources from a doctest docstring as a script. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the doc string with tests to be debugged. + """ + module = _normalize_module(module) + tests = DocTestFinder().find(module) + test = [t for t in tests if t.name == name] + if not test: + raise ValueError(name, "not found in tests") + test = test[0] + testsrc = script_from_examples(test.docstring) + return testsrc + +def debug_src(src, pm=False, globs=None): + """Debug a single doctest docstring, in argument `src`'""" + testsrc = script_from_examples(src) + debug_script(testsrc, pm, globs) + +def debug_script(src, pm=False, globs=None): + "Debug a test script. `src` is the script, as a string." + import pdb + + # Note that tempfile.NameTemporaryFile() cannot be used. As the + # docs say, a file so created cannot be opened by name a second time + # on modern Windows boxes, and execfile() needs to open it. + srcfilename = tempfile.mktemp(".py", "doctestdebug") + f = open(srcfilename, 'w') + f.write(src) + f.close() + + try: + if globs: + globs = globs.copy() + else: + globs = {} + + if pm: + try: + execfile(srcfilename, globs, globs) + except: + print(sys.exc_info()[1]) + pdb.post_mortem(sys.exc_info()[2]) + else: + # Note that %r is vital here. '%s' instead can, e.g., cause + # backslashes to get treated as metacharacters on Windows. + pdb.run("execfile(%r)" % srcfilename, globs, globs) + + finally: + os.remove(srcfilename) + +def debug(module, name, pm=False): + """Debug a single doctest docstring. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the docstring with tests to be debugged. + """ + module = _normalize_module(module) + testsrc = testsource(module, name) + debug_script(testsrc, pm, module.__dict__) + +###################################################################### +## 10. Example Usage +###################################################################### +class _TestClass: + """ + A pointless class, for sanity-checking of docstring testing. + + Methods: + square() + get() + + >>> _TestClass(13).get() + _TestClass(-12).get() + 1 + >>> hex(_TestClass(13).square().get()) + '0xa9' + """ + + def __init__(self, val): + """val -> _TestClass object with associated value val. + + >>> t = _TestClass(123) + >>> print t.get() + 123 + """ + + self.val = val + + def square(self): + """square() -> square TestClass's associated value + + >>> _TestClass(13).square().get() + 169 + """ + + self.val = self.val ** 2 + return self + + def get(self): + """get() -> return TestClass's associated value. + + >>> x = _TestClass(-42) + >>> print x.get() + -42 + """ + + return self.val + +__test__ = {"_TestClass": _TestClass, + "string": r""" + Example of a string object, searched as-is. + >>> x = 1; y = 2 + >>> x + y, x * y + (3, 2) + """, + + "bool-int equivalence": r""" + In 2.2, boolean expressions displayed + 0 or 1. By default, we still accept + them. This can be disabled by passing + DONT_ACCEPT_TRUE_FOR_1 to the new + optionflags argument. + >>> 4 == 4 + 1 + >>> 4 == 4 + True + >>> 4 > 4 + 0 + >>> 4 > 4 + False + """, + + "blank lines": r""" + Blank lines can be marked with : + >>> print 'foo\n\nbar\n' + foo + + bar + + """, + + "ellipsis": r""" + If the ellipsis flag is used, then '...' can be used to + elide substrings in the desired output: + >>> print range(1000) #doctest: +ELLIPSIS + [0, 1, 2, ..., 999] + """, + + "whitespace normalization": r""" + If the whitespace normalization flag is used, then + differences in whitespace are ignored. + >>> print range(30) #doctest: +NORMALIZE_WHITESPACE + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29] + """, + } + +def _test(): + r = unittest.TextTestRunner() + r.run(DocTestSuite()) + +if __name__ == "__main__": + _test() + diff --git a/awx/lib/site-packages/setuptools/tests/py26compat.py b/awx/lib/site-packages/setuptools/tests/py26compat.py new file mode 100644 index 0000000000..d4fb891af6 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/py26compat.py @@ -0,0 +1,14 @@ +import unittest + +try: + # provide skipIf for Python 2.4-2.6 + skipIf = unittest.skipIf +except AttributeError: + def skipIf(condition, reason): + def skipper(func): + def skip(*args, **kwargs): + return + if condition: + return skip + return func + return skipper diff --git a/awx/lib/site-packages/setuptools/tests/script-with-bom.py b/awx/lib/site-packages/setuptools/tests/script-with-bom.py new file mode 100644 index 0000000000..22dee0d2a3 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/script-with-bom.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- + +result = 'passed' diff --git a/awx/lib/site-packages/setuptools/tests/server.py b/awx/lib/site-packages/setuptools/tests/server.py new file mode 100644 index 0000000000..ae2381e355 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/server.py @@ -0,0 +1,82 @@ +"""Basic http server for tests to simulate PyPI or custom indexes +""" +import sys +import time +import threading +from setuptools.compat import BaseHTTPRequestHandler +from setuptools.compat import (urllib2, URLError, HTTPServer, + SimpleHTTPRequestHandler) + +class IndexServer(HTTPServer): + """Basic single-threaded http server simulating a package index + + You can use this server in unittest like this:: + s = IndexServer() + s.start() + index_url = s.base_url() + 'mytestindex' + # do some test requests to the index + # The index files should be located in setuptools/tests/indexes + s.stop() + """ + def __init__(self, server_address=('', 0), + RequestHandlerClass=SimpleHTTPRequestHandler): + HTTPServer.__init__(self, server_address, RequestHandlerClass) + self._run = True + + def serve(self): + while self._run: + self.handle_request() + + def start(self): + self.thread = threading.Thread(target=self.serve) + self.thread.start() + + def stop(self): + "Stop the server" + + # Let the server finish the last request and wait for a new one. + time.sleep(0.1) + + # self.shutdown is not supported on python < 2.6, so just + # set _run to false, and make a request, causing it to + # terminate. + self._run = False + url = 'http://127.0.0.1:%(server_port)s/' % vars(self) + try: + if sys.version_info >= (2, 6): + urllib2.urlopen(url, timeout=5) + else: + urllib2.urlopen(url) + except URLError: + # ignore any errors; all that's important is the request + pass + self.thread.join() + self.socket.close() + + def base_url(self): + port = self.server_port + return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port + +class RequestRecorder(BaseHTTPRequestHandler): + def do_GET(self): + requests = vars(self.server).setdefault('requests', []) + requests.append(self) + self.send_response(200, 'OK') + +class MockServer(HTTPServer, threading.Thread): + """ + A simple HTTP Server that records the requests made to it. + """ + def __init__(self, server_address=('', 0), + RequestHandlerClass=RequestRecorder): + HTTPServer.__init__(self, server_address, RequestHandlerClass) + threading.Thread.__init__(self) + self.setDaemon(True) + self.requests = [] + + def run(self): + self.serve_forever() + + def url(self): + return 'http://localhost:%(server_port)s/' % vars(self) + url = property(url) diff --git a/awx/lib/site-packages/setuptools/tests/test_bdist_egg.py b/awx/lib/site-packages/setuptools/tests/test_bdist_egg.py new file mode 100644 index 0000000000..1a12218645 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_bdist_egg.py @@ -0,0 +1,69 @@ +"""develop tests +""" +import sys +import os, re, shutil, tempfile, unittest +import tempfile +import site + +from distutils.errors import DistutilsError +from setuptools.compat import StringIO +from setuptools.command.bdist_egg import bdist_egg +from setuptools.command import easy_install as easy_install_pkg +from setuptools.dist import Distribution + +SETUP_PY = """\ +from setuptools import setup + +setup(name='foo', py_modules=['hi']) +""" + +class TestDevelopTest(unittest.TestCase): + + def setUp(self): + self.dir = tempfile.mkdtemp() + self.old_cwd = os.getcwd() + os.chdir(self.dir) + f = open('setup.py', 'w') + f.write(SETUP_PY) + f.close() + f = open('hi.py', 'w') + f.write('1\n') + f.close() + if sys.version >= "2.6": + self.old_base = site.USER_BASE + site.USER_BASE = tempfile.mkdtemp() + self.old_site = site.USER_SITE + site.USER_SITE = tempfile.mkdtemp() + + def tearDown(self): + os.chdir(self.old_cwd) + shutil.rmtree(self.dir) + if sys.version >= "2.6": + shutil.rmtree(site.USER_BASE) + shutil.rmtree(site.USER_SITE) + site.USER_BASE = self.old_base + site.USER_SITE = self.old_site + + def test_bdist_egg(self): + dist = Distribution(dict( + script_name='setup.py', + script_args=['bdist_egg'], + name='foo', + py_modules=['hi'] + )) + os.makedirs(os.path.join('build', 'src')) + old_stdout = sys.stdout + sys.stdout = o = StringIO() + try: + dist.parse_command_line() + dist.run_commands() + finally: + sys.stdout = old_stdout + + # let's see if we got our egg link at the right place + [content] = os.listdir('dist') + self.assertTrue(re.match('foo-0.0.0-py[23].\d.egg$', content)) + +def test_suite(): + return unittest.makeSuite(TestDevelopTest) + diff --git a/awx/lib/site-packages/setuptools/tests/test_build_ext.py b/awx/lib/site-packages/setuptools/tests/test_build_ext.py new file mode 100644 index 0000000000..a520ced9d6 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_build_ext.py @@ -0,0 +1,20 @@ +"""build_ext tests +""" +import os, shutil, tempfile, unittest +from distutils.command.build_ext import build_ext as distutils_build_ext +from setuptools.command.build_ext import build_ext +from setuptools.dist import Distribution + +class TestBuildExtTest(unittest.TestCase): + + def test_get_ext_filename(self): + # setuptools needs to give back the same + # result than distutils, even if the fullname + # is not in ext_map + dist = Distribution() + cmd = build_ext(dist) + cmd.ext_map['foo/bar'] = '' + res = cmd.get_ext_filename('foo') + wanted = distutils_build_ext.get_ext_filename(cmd, 'foo') + assert res == wanted + diff --git a/awx/lib/site-packages/setuptools/tests/test_develop.py b/awx/lib/site-packages/setuptools/tests/test_develop.py new file mode 100644 index 0000000000..7b90161a8a --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_develop.py @@ -0,0 +1,122 @@ +"""develop tests +""" +import sys +import os, shutil, tempfile, unittest +import tempfile +import site + +from distutils.errors import DistutilsError +from setuptools.command.develop import develop +from setuptools.command import easy_install as easy_install_pkg +from setuptools.compat import StringIO +from setuptools.dist import Distribution + +SETUP_PY = """\ +from setuptools import setup + +setup(name='foo', + packages=['foo'], + use_2to3=True, +) +""" + +INIT_PY = """print "foo" +""" + +class TestDevelopTest(unittest.TestCase): + + def setUp(self): + if sys.version < "2.6" or hasattr(sys, 'real_prefix'): + return + + # Directory structure + self.dir = tempfile.mkdtemp() + os.mkdir(os.path.join(self.dir, 'foo')) + # setup.py + setup = os.path.join(self.dir, 'setup.py') + f = open(setup, 'w') + f.write(SETUP_PY) + f.close() + self.old_cwd = os.getcwd() + # foo/__init__.py + init = os.path.join(self.dir, 'foo', '__init__.py') + f = open(init, 'w') + f.write(INIT_PY) + f.close() + + os.chdir(self.dir) + self.old_base = site.USER_BASE + site.USER_BASE = tempfile.mkdtemp() + self.old_site = site.USER_SITE + site.USER_SITE = tempfile.mkdtemp() + + def tearDown(self): + if sys.version < "2.6" or hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix): + return + + os.chdir(self.old_cwd) + shutil.rmtree(self.dir) + shutil.rmtree(site.USER_BASE) + shutil.rmtree(site.USER_SITE) + site.USER_BASE = self.old_base + site.USER_SITE = self.old_site + + def test_develop(self): + if sys.version < "2.6" or hasattr(sys, 'real_prefix'): + return + dist = Distribution( + dict(name='foo', + packages=['foo'], + use_2to3=True, + version='0.0', + )) + dist.script_name = 'setup.py' + cmd = develop(dist) + cmd.user = 1 + cmd.ensure_finalized() + cmd.install_dir = site.USER_SITE + cmd.user = 1 + old_stdout = sys.stdout + #sys.stdout = StringIO() + try: + cmd.run() + finally: + sys.stdout = old_stdout + + # let's see if we got our egg link at the right place + content = os.listdir(site.USER_SITE) + content.sort() + self.assertEqual(content, ['easy-install.pth', 'foo.egg-link']) + + # Check that we are using the right code. + egg_link_file = open(os.path.join(site.USER_SITE, 'foo.egg-link'), 'rt') + try: + path = egg_link_file.read().split()[0].strip() + finally: + egg_link_file.close() + init_file = open(os.path.join(path, 'foo', '__init__.py'), 'rt') + try: + init = init_file.read().strip() + finally: + init_file.close() + if sys.version < "3": + self.assertEqual(init, 'print "foo"') + else: + self.assertEqual(init, 'print("foo")') + + def notest_develop_with_setup_requires(self): + + wanted = ("Could not find suitable distribution for " + "Requirement.parse('I-DONT-EXIST')") + old_dir = os.getcwd() + os.chdir(self.dir) + try: + try: + dist = Distribution({'setup_requires': ['I_DONT_EXIST']}) + except DistutilsError: + e = sys.exc_info()[1] + error = str(e) + if error == wanted: + pass + finally: + os.chdir(old_dir) diff --git a/awx/lib/site-packages/setuptools/tests/test_dist_info.py b/awx/lib/site-packages/setuptools/tests/test_dist_info.py new file mode 100644 index 0000000000..a8adb68c2d --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_dist_info.py @@ -0,0 +1,83 @@ +"""Test .dist-info style distributions. +""" +import os +import shutil +import tempfile +import unittest +import textwrap + +try: + import ast +except: + pass + +import pkg_resources + +from setuptools.tests.py26compat import skipIf + +def DALS(s): + "dedent and left-strip" + return textwrap.dedent(s).lstrip() + +class TestDistInfo(unittest.TestCase): + + def test_distinfo(self): + dists = {} + for d in pkg_resources.find_distributions(self.tmpdir): + dists[d.project_name] = d + + assert len(dists) == 2, dists + + unversioned = dists['UnversionedDistribution'] + versioned = dists['VersionedDistribution'] + + assert versioned.version == '2.718' # from filename + assert unversioned.version == '0.3' # from METADATA + + @skipIf('ast' not in globals(), + "ast is used to test conditional dependencies (Python >= 2.6)") + def test_conditional_dependencies(self): + requires = [pkg_resources.Requirement.parse('splort==4'), + pkg_resources.Requirement.parse('quux>=1.1')] + + for d in pkg_resources.find_distributions(self.tmpdir): + self.assertEqual(d.requires(), requires[:1]) + self.assertEqual(d.requires(extras=('baz',)), requires) + self.assertEqual(d.extras, ['baz']) + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + versioned = os.path.join(self.tmpdir, + 'VersionedDistribution-2.718.dist-info') + os.mkdir(versioned) + metadata_file = open(os.path.join(versioned, 'METADATA'), 'w+') + try: + metadata_file.write(DALS( + """ + Metadata-Version: 1.2 + Name: VersionedDistribution + Requires-Dist: splort (4) + Provides-Extra: baz + Requires-Dist: quux (>=1.1); extra == 'baz' + """)) + finally: + metadata_file.close() + unversioned = os.path.join(self.tmpdir, + 'UnversionedDistribution.dist-info') + os.mkdir(unversioned) + metadata_file = open(os.path.join(unversioned, 'METADATA'), 'w+') + try: + metadata_file.write(DALS( + """ + Metadata-Version: 1.2 + Name: UnversionedDistribution + Version: 0.3 + Requires-Dist: splort (==4) + Provides-Extra: baz + Requires-Dist: quux (>=1.1); extra == 'baz' + """)) + finally: + metadata_file.close() + + def tearDown(self): + shutil.rmtree(self.tmpdir) diff --git a/awx/lib/site-packages/setuptools/tests/test_easy_install.py b/awx/lib/site-packages/setuptools/tests/test_easy_install.py new file mode 100644 index 0000000000..189e3d55df --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_easy_install.py @@ -0,0 +1,426 @@ +"""Easy install Tests +""" +import sys +import os +import shutil +import tempfile +import unittest +import site +from setuptools.compat import StringIO, BytesIO, next +from setuptools.compat import urlparse +import textwrap +import tarfile +import distutils.core + +from setuptools.compat import StringIO, BytesIO, next, urlparse +from setuptools.sandbox import run_setup, SandboxViolation +from setuptools.command.easy_install import easy_install, fix_jython_executable, get_script_args, nt_quote_arg +from setuptools.command.easy_install import PthDistributions +from setuptools.command import easy_install as easy_install_pkg +from setuptools.dist import Distribution +from pkg_resources import Distribution as PRDistribution +import setuptools.tests.server + +try: + # import multiprocessing solely for the purpose of testing its existence + __import__('multiprocessing') + import logging + _LOG = logging.getLogger('test_easy_install') + logging.basicConfig(level=logging.INFO, stream=sys.stderr) + _MULTIPROC = True +except ImportError: + _MULTIPROC = False + _LOG = None + +class FakeDist(object): + def get_entry_map(self, group): + if group != 'console_scripts': + return {} + return {'name': 'ep'} + + def as_requirement(self): + return 'spec' + +WANTED = """\ +#!%s +# EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name' +__requires__ = 'spec' +import sys +from pkg_resources import load_entry_point + +if __name__ == '__main__': + sys.exit( + load_entry_point('spec', 'console_scripts', 'name')() + ) +""" % nt_quote_arg(fix_jython_executable(sys.executable, "")) + +SETUP_PY = """\ +from setuptools import setup + +setup(name='foo') +""" + +class TestEasyInstallTest(unittest.TestCase): + + def test_install_site_py(self): + dist = Distribution() + cmd = easy_install(dist) + cmd.sitepy_installed = False + cmd.install_dir = tempfile.mkdtemp() + try: + cmd.install_site_py() + sitepy = os.path.join(cmd.install_dir, 'site.py') + self.assertTrue(os.path.exists(sitepy)) + finally: + shutil.rmtree(cmd.install_dir) + + def test_get_script_args(self): + dist = FakeDist() + + old_platform = sys.platform + try: + name, script = [i for i in next(get_script_args(dist))][0:2] + finally: + sys.platform = old_platform + + self.assertEqual(script, WANTED) + + def test_no_find_links(self): + # new option '--no-find-links', that blocks find-links added at + # the project level + dist = Distribution() + cmd = easy_install(dist) + cmd.check_pth_processing = lambda: True + cmd.no_find_links = True + cmd.find_links = ['link1', 'link2'] + cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok') + cmd.args = ['ok'] + cmd.ensure_finalized() + self.assertEqual(cmd.package_index.scanned_urls, {}) + + # let's try without it (default behavior) + cmd = easy_install(dist) + cmd.check_pth_processing = lambda: True + cmd.find_links = ['link1', 'link2'] + cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok') + cmd.args = ['ok'] + cmd.ensure_finalized() + keys = sorted(cmd.package_index.scanned_urls.keys()) + self.assertEqual(keys, ['link1', 'link2']) + + +class TestPTHFileWriter(unittest.TestCase): + def test_add_from_cwd_site_sets_dirty(self): + '''a pth file manager should set dirty + if a distribution is in site but also the cwd + ''' + pth = PthDistributions('does-not_exist', [os.getcwd()]) + self.assertTrue(not pth.dirty) + pth.add(PRDistribution(os.getcwd())) + self.assertTrue(pth.dirty) + + def test_add_from_site_is_ignored(self): + if os.name != 'nt': + location = '/test/location/does-not-have-to-exist' + else: + location = 'c:\\does_not_exist' + pth = PthDistributions('does-not_exist', [location, ]) + self.assertTrue(not pth.dirty) + pth.add(PRDistribution(location)) + self.assertTrue(not pth.dirty) + + +class TestUserInstallTest(unittest.TestCase): + + def setUp(self): + self.dir = tempfile.mkdtemp() + setup = os.path.join(self.dir, 'setup.py') + f = open(setup, 'w') + f.write(SETUP_PY) + f.close() + self.old_cwd = os.getcwd() + os.chdir(self.dir) + if sys.version >= "2.6": + self.old_has_site = easy_install_pkg.HAS_USER_SITE + self.old_file = easy_install_pkg.__file__ + self.old_base = site.USER_BASE + site.USER_BASE = tempfile.mkdtemp() + self.old_site = site.USER_SITE + site.USER_SITE = tempfile.mkdtemp() + easy_install_pkg.__file__ = site.USER_SITE + + def tearDown(self): + os.chdir(self.old_cwd) + shutil.rmtree(self.dir) + if sys.version >= "2.6": + shutil.rmtree(site.USER_BASE) + shutil.rmtree(site.USER_SITE) + site.USER_BASE = self.old_base + site.USER_SITE = self.old_site + easy_install_pkg.HAS_USER_SITE = self.old_has_site + easy_install_pkg.__file__ = self.old_file + + def test_user_install_implied(self): + easy_install_pkg.HAS_USER_SITE = True # disabled sometimes + #XXX: replace with something meaningfull + if sys.version < "2.6": + return #SKIP + dist = Distribution() + dist.script_name = 'setup.py' + cmd = easy_install(dist) + cmd.args = ['py'] + cmd.ensure_finalized() + self.assertTrue(cmd.user, 'user should be implied') + + def test_multiproc_atexit(self): + if not _MULTIPROC: + return + _LOG.info('this should not break') + + def test_user_install_not_implied_without_usersite_enabled(self): + easy_install_pkg.HAS_USER_SITE = False # usually enabled + #XXX: replace with something meaningfull + if sys.version < "2.6": + return #SKIP + dist = Distribution() + dist.script_name = 'setup.py' + cmd = easy_install(dist) + cmd.args = ['py'] + cmd.initialize_options() + self.assertFalse(cmd.user, 'NOT user should be implied') + + def test_local_index(self): + # make sure the local index is used + # when easy_install looks for installed + # packages + new_location = tempfile.mkdtemp() + target = tempfile.mkdtemp() + egg_file = os.path.join(new_location, 'foo-1.0.egg-info') + f = open(egg_file, 'w') + try: + f.write('Name: foo\n') + finally: + f.close() + + sys.path.append(target) + old_ppath = os.environ.get('PYTHONPATH') + os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path) + try: + dist = Distribution() + dist.script_name = 'setup.py' + cmd = easy_install(dist) + cmd.install_dir = target + cmd.args = ['foo'] + cmd.ensure_finalized() + cmd.local_index.scan([new_location]) + res = cmd.easy_install('foo') + self.assertEqual(os.path.realpath(res.location), + os.path.realpath(new_location)) + finally: + sys.path.remove(target) + for basedir in [new_location, target, ]: + if not os.path.exists(basedir) or not os.path.isdir(basedir): + continue + try: + shutil.rmtree(basedir) + except: + pass + if old_ppath is not None: + os.environ['PYTHONPATH'] = old_ppath + else: + del os.environ['PYTHONPATH'] + + def test_setup_requires(self): + """Regression test for Distribute issue #318 + + Ensure that a package with setup_requires can be installed when + setuptools is installed in the user site-packages without causing a + SandboxViolation. + """ + + test_setup_attrs = { + 'name': 'test_pkg', 'version': '0.0', + 'setup_requires': ['foobar'], + 'dependency_links': [os.path.abspath(self.dir)] + } + + test_pkg = os.path.join(self.dir, 'test_pkg') + test_setup_py = os.path.join(test_pkg, 'setup.py') + test_setup_cfg = os.path.join(test_pkg, 'setup.cfg') + os.mkdir(test_pkg) + + f = open(test_setup_py, 'w') + f.write(textwrap.dedent("""\ + import setuptools + setuptools.setup(**%r) + """ % test_setup_attrs)) + f.close() + + foobar_path = os.path.join(self.dir, 'foobar-0.1.tar.gz') + make_trivial_sdist( + foobar_path, + textwrap.dedent("""\ + import setuptools + setuptools.setup( + name='foobar', + version='0.1' + ) + """)) + + old_stdout = sys.stdout + old_stderr = sys.stderr + sys.stdout = StringIO() + sys.stderr = StringIO() + try: + try: + reset_setup_stop_context( + lambda: run_setup(test_setup_py, ['install']) + ) + except SandboxViolation: + self.fail('Installation caused SandboxViolation') + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + +class TestSetupRequires(unittest.TestCase): + + def test_setup_requires_honors_fetch_params(self): + """ + When easy_install installs a source distribution which specifies + setup_requires, it should honor the fetch parameters (such as + allow-hosts, index-url, and find-links). + """ + # set up a server which will simulate an alternate package index. + p_index = setuptools.tests.server.MockServer() + p_index.start() + netloc = 1 + p_index_loc = urlparse(p_index.url)[netloc] + if p_index_loc.endswith(':0'): + # Some platforms (Jython) don't find a port to which to bind, + # so skip this test for them. + return + + # I realize this is all-but-impossible to read, because it was + # ported from some well-factored, safe code using 'with'. If you + # need to maintain this code, consider making the changes in + # the parent revision (of this comment) and then port the changes + # back for Python 2.4 (or deprecate Python 2.4). + + def install(dist_file): + def install_at(temp_install_dir): + def install_env(): + ei_params = ['--index-url', p_index.url, + '--allow-hosts', p_index_loc, + '--exclude-scripts', '--install-dir', temp_install_dir, + dist_file] + def install_clean_reset(): + def install_clean_argv(): + # attempt to install the dist. It should fail because + # it doesn't exist. + self.assertRaises(SystemExit, + easy_install_pkg.main, ei_params) + argv_context(install_clean_argv, ['easy_install']) + reset_setup_stop_context(install_clean_reset) + environment_context(install_env, PYTHONPATH=temp_install_dir) + tempdir_context(install_at) + + # create an sdist that has a build-time dependency. + self.create_sdist(install) + + # there should have been two or three requests to the server + # (three happens on Python 3.3a) + self.assertTrue(2 <= len(p_index.requests) <= 3) + self.assertEqual(p_index.requests[0].path, '/does-not-exist/') + + def create_sdist(self, installer): + """ + Create an sdist with a setup_requires dependency (of something that + doesn't exist) and invoke installer on it. + """ + def build_sdist(dir): + dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz') + make_trivial_sdist( + dist_path, + textwrap.dedent(""" + import setuptools + setuptools.setup( + name="setuptools-test-fetcher", + version="1.0", + setup_requires = ['does-not-exist'], + ) + """).lstrip()) + installer(dist_path) + tempdir_context(build_sdist) + + +def make_trivial_sdist(dist_path, setup_py): + """Create a simple sdist tarball at dist_path, containing just a + setup.py, the contents of which are provided by the setup_py string. + """ + + setup_py_file = tarfile.TarInfo(name='setup.py') + try: + # Python 3 (StringIO gets converted to io module) + MemFile = BytesIO + except AttributeError: + MemFile = StringIO + setup_py_bytes = MemFile(setup_py.encode('utf-8')) + setup_py_file.size = len(setup_py_bytes.getvalue()) + dist = tarfile.open(dist_path, 'w:gz') + try: + dist.addfile(setup_py_file, fileobj=setup_py_bytes) + finally: + dist.close() + + +def tempdir_context(f, cd=lambda dir:None): + """ + Invoke f in the context + """ + temp_dir = tempfile.mkdtemp() + orig_dir = os.getcwd() + try: + cd(temp_dir) + f(temp_dir) + finally: + cd(orig_dir) + shutil.rmtree(temp_dir) + +def environment_context(f, **updates): + """ + Invoke f in the context + """ + old_env = os.environ.copy() + os.environ.update(updates) + try: + f() + finally: + for key in updates: + del os.environ[key] + os.environ.update(old_env) + +def argv_context(f, repl): + """ + Invoke f in the context + """ + old_argv = sys.argv[:] + sys.argv[:] = repl + try: + f() + finally: + sys.argv[:] = old_argv + +def reset_setup_stop_context(f): + """ + When the setuptools tests are run using setup.py test, and then + one wants to invoke another setup() command (such as easy_install) + within those tests, it's necessary to reset the global variable + in distutils.core so that the setup() command will run naturally. + """ + setup_stop_after = distutils.core._setup_stop_after + distutils.core._setup_stop_after = None + try: + f() + finally: + distutils.core._setup_stop_after = setup_stop_after diff --git a/awx/lib/site-packages/setuptools/tests/test_egg_info.py b/awx/lib/site-packages/setuptools/tests/test_egg_info.py new file mode 100644 index 0000000000..f26a1f5191 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_egg_info.py @@ -0,0 +1,40 @@ +import os +import tempfile +import shutil +import unittest + +import pkg_resources +from setuptools.command import egg_info + +ENTRIES_V10 = pkg_resources.resource_string(__name__, 'entries-v10') +"An entries file generated with svn 1.6.17 against the legacy Setuptools repo" + +class TestEggInfo(unittest.TestCase): + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + os.mkdir(os.path.join(self.test_dir, '.svn')) + + self.old_cwd = os.getcwd() + os.chdir(self.test_dir) + + def tearDown(self): + os.chdir(self.old_cwd) + shutil.rmtree(self.test_dir) + + def _write_entries(self, entries): + fn = os.path.join(self.test_dir, '.svn', 'entries') + entries_f = open(fn, 'wb') + entries_f.write(entries) + entries_f.close() + + def test_version_10_format(self): + """ + """ + self._write_entries(ENTRIES_V10) + rev = egg_info.egg_info.get_svn_revision() + self.assertEqual(rev, '89000') + + +def test_suite(): + return unittest.defaultTestLoader.loadTestsFromName(__name__) diff --git a/awx/lib/site-packages/setuptools/tests/test_markerlib.py b/awx/lib/site-packages/setuptools/tests/test_markerlib.py new file mode 100644 index 0000000000..dae71cba46 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_markerlib.py @@ -0,0 +1,68 @@ +import os +import unittest +from setuptools.tests.py26compat import skipIf + +try: + import ast +except ImportError: + pass + +class TestMarkerlib(unittest.TestCase): + + @skipIf('ast' not in globals(), + "ast not available (Python < 2.6?)") + def test_markers(self): + from _markerlib import interpret, default_environment, compile + + os_name = os.name + + self.assertTrue(interpret("")) + + self.assertTrue(interpret("os.name != 'buuuu'")) + self.assertTrue(interpret("os_name != 'buuuu'")) + self.assertTrue(interpret("python_version > '1.0'")) + self.assertTrue(interpret("python_version < '5.0'")) + self.assertTrue(interpret("python_version <= '5.0'")) + self.assertTrue(interpret("python_version >= '1.0'")) + self.assertTrue(interpret("'%s' in os.name" % os_name)) + self.assertTrue(interpret("'%s' in os_name" % os_name)) + self.assertTrue(interpret("'buuuu' not in os.name")) + + self.assertFalse(interpret("os.name == 'buuuu'")) + self.assertFalse(interpret("os_name == 'buuuu'")) + self.assertFalse(interpret("python_version < '1.0'")) + self.assertFalse(interpret("python_version > '5.0'")) + self.assertFalse(interpret("python_version >= '5.0'")) + self.assertFalse(interpret("python_version <= '1.0'")) + self.assertFalse(interpret("'%s' not in os.name" % os_name)) + self.assertFalse(interpret("'buuuu' in os.name and python_version >= '5.0'")) + self.assertFalse(interpret("'buuuu' in os_name and python_version >= '5.0'")) + + environment = default_environment() + environment['extra'] = 'test' + self.assertTrue(interpret("extra == 'test'", environment)) + self.assertFalse(interpret("extra == 'doc'", environment)) + + def raises_nameError(): + try: + interpret("python.version == '42'") + except NameError: + pass + else: + raise Exception("Expected NameError") + + raises_nameError() + + def raises_syntaxError(): + try: + interpret("(x for x in (4,))") + except SyntaxError: + pass + else: + raise Exception("Expected SyntaxError") + + raises_syntaxError() + + statement = "python_version == '5'" + self.assertEqual(compile(statement).__doc__, statement) + diff --git a/awx/lib/site-packages/setuptools/tests/test_packageindex.py b/awx/lib/site-packages/setuptools/tests/test_packageindex.py new file mode 100644 index 0000000000..08969b7e8c --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_packageindex.py @@ -0,0 +1,187 @@ +"""Package Index Tests +""" +import sys +import unittest +import pkg_resources +from setuptools.compat import urllib2, httplib, HTTPError, unicode +import distutils.errors +import setuptools.package_index +from setuptools.tests.server import IndexServer + +class TestPackageIndex(unittest.TestCase): + + def test_bad_url_bad_port(self): + index = setuptools.package_index.PackageIndex() + url = 'http://127.0.0.1:0/nonesuch/test_package_index' + try: + v = index.open_url(url) + except Exception: + v = sys.exc_info()[1] + self.assertTrue(url in str(v)) + else: + self.assertTrue(isinstance(v, HTTPError)) + + def test_bad_url_typo(self): + # issue 16 + # easy_install inquant.contentmirror.plone breaks because of a typo + # in its home URL + index = setuptools.package_index.PackageIndex( + hosts=('www.example.com',) + ) + + url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk' + try: + v = index.open_url(url) + except Exception: + v = sys.exc_info()[1] + self.assertTrue(url in str(v)) + else: + self.assertTrue(isinstance(v, HTTPError)) + + def test_bad_url_bad_status_line(self): + index = setuptools.package_index.PackageIndex( + hosts=('www.example.com',) + ) + + def _urlopen(*args): + raise httplib.BadStatusLine('line') + + index.opener = _urlopen + url = 'http://example.com' + try: + v = index.open_url(url) + except Exception: + v = sys.exc_info()[1] + self.assertTrue('line' in str(v)) + else: + raise AssertionError('Should have raise here!') + + def test_bad_url_double_scheme(self): + """ + A bad URL with a double scheme should raise a DistutilsError. + """ + index = setuptools.package_index.PackageIndex( + hosts=('www.example.com',) + ) + + # issue 20 + url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk' + try: + index.open_url(url) + except distutils.errors.DistutilsError: + error = sys.exc_info()[1] + msg = unicode(error) + assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg + return + raise RuntimeError("Did not raise") + + def test_bad_url_screwy_href(self): + index = setuptools.package_index.PackageIndex( + hosts=('www.example.com',) + ) + + # issue #160 + if sys.version_info[0] == 2 and sys.version_info[1] == 7: + # this should not fail + url = 'http://example.com' + page = ('') + index.process_index(url, page) + + def test_url_ok(self): + index = setuptools.package_index.PackageIndex( + hosts=('www.example.com',) + ) + url = 'file:///tmp/test_package_index' + self.assertTrue(index.url_ok(url, True)) + + def test_links_priority(self): + """ + Download links from the pypi simple index should be used before + external download links. + https://bitbucket.org/tarek/distribute/issue/163 + + Usecase : + - someone uploads a package on pypi, a md5 is generated + - someone manually copies this link (with the md5 in the url) onto an + external page accessible from the package page. + - someone reuploads the package (with a different md5) + - while easy_installing, an MD5 error occurs because the external link + is used + -> Setuptools should use the link from pypi, not the external one. + """ + if sys.platform.startswith('java'): + # Skip this test on jython because binding to :0 fails + return + + # start an index server + server = IndexServer() + server.start() + index_url = server.base_url() + 'test_links_priority/simple/' + + # scan a test index + pi = setuptools.package_index.PackageIndex(index_url) + requirement = pkg_resources.Requirement.parse('foobar') + pi.find_packages(requirement) + server.stop() + + # the distribution has been found + self.assertTrue('foobar' in pi) + # we have only one link, because links are compared without md5 + self.assertTrue(len(pi['foobar'])==1) + # the link should be from the index + self.assertTrue('correct_md5' in pi['foobar'][0].location) + + def test_parse_bdist_wininst(self): + self.assertEqual(setuptools.package_index.parse_bdist_wininst( + 'reportlab-2.5.win32-py2.4.exe'), ('reportlab-2.5', '2.4', 'win32')) + self.assertEqual(setuptools.package_index.parse_bdist_wininst( + 'reportlab-2.5.win32.exe'), ('reportlab-2.5', None, 'win32')) + self.assertEqual(setuptools.package_index.parse_bdist_wininst( + 'reportlab-2.5.win-amd64-py2.7.exe'), ('reportlab-2.5', '2.7', 'win-amd64')) + self.assertEqual(setuptools.package_index.parse_bdist_wininst( + 'reportlab-2.5.win-amd64.exe'), ('reportlab-2.5', None, 'win-amd64')) + + def test__vcs_split_rev_from_url(self): + """ + Test the basic usage of _vcs_split_rev_from_url + """ + vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url + url, rev = vsrfu('https://example.com/bar@2995') + self.assertEqual(url, 'https://example.com/bar') + self.assertEqual(rev, '2995') + +class TestContentCheckers(unittest.TestCase): + + def test_md5(self): + checker = setuptools.package_index.HashChecker.from_url( + 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478') + checker.feed('You should probably not be using MD5'.encode('ascii')) + self.assertEqual(checker.hash.hexdigest(), + 'f12895fdffbd45007040d2e44df98478') + self.assertTrue(checker.is_valid()) + + def test_other_fragment(self): + "Content checks should succeed silently if no hash is present" + checker = setuptools.package_index.HashChecker.from_url( + 'http://foo/bar#something%20completely%20different') + checker.feed('anything'.encode('ascii')) + self.assertTrue(checker.is_valid()) + + def test_blank_md5(self): + "Content checks should succeed if a hash is empty" + checker = setuptools.package_index.HashChecker.from_url( + 'http://foo/bar#md5=') + checker.feed('anything'.encode('ascii')) + self.assertTrue(checker.is_valid()) + + def test_get_hash_name_md5(self): + checker = setuptools.package_index.HashChecker.from_url( + 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478') + self.assertEqual(checker.hash_name, 'md5') + + def test_report(self): + checker = setuptools.package_index.HashChecker.from_url( + 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478') + rep = checker.report(lambda x: x, 'My message about %s') + self.assertEqual(rep, 'My message about md5') diff --git a/awx/lib/site-packages/setuptools/tests/test_resources.py b/awx/lib/site-packages/setuptools/tests/test_resources.py new file mode 100644 index 0000000000..c9fcf76c9b --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_resources.py @@ -0,0 +1,620 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# NOTE: the shebang and encoding lines are for ScriptHeaderTests do not remove + +import os +import sys +import tempfile +import shutil +from unittest import TestCase + +import pkg_resources +from pkg_resources import (parse_requirements, VersionConflict, parse_version, + Distribution, EntryPoint, Requirement, safe_version, safe_name, + WorkingSet) + +from setuptools.command.easy_install import (get_script_header, is_sh, + nt_quote_arg) +from setuptools.compat import StringIO, iteritems + +try: + frozenset +except NameError: + from sets import ImmutableSet as frozenset + +def safe_repr(obj, short=False): + """ copied from Python2.7""" + try: + result = repr(obj) + except Exception: + result = object.__repr__(obj) + if not short or len(result) < pkg_resources._MAX_LENGTH: + return result + return result[:pkg_resources._MAX_LENGTH] + ' [truncated]...' + +class Metadata(pkg_resources.EmptyProvider): + """Mock object to return metadata as if from an on-disk distribution""" + + def __init__(self,*pairs): + self.metadata = dict(pairs) + + def has_metadata(self,name): + return name in self.metadata + + def get_metadata(self,name): + return self.metadata[name] + + def get_metadata_lines(self,name): + return pkg_resources.yield_lines(self.get_metadata(name)) + +dist_from_fn = pkg_resources.Distribution.from_filename + +class DistroTests(TestCase): + + def testCollection(self): + # empty path should produce no distributions + ad = pkg_resources.Environment([], platform=None, python=None) + self.assertEqual(list(ad), []) + self.assertEqual(ad['FooPkg'],[]) + ad.add(dist_from_fn("FooPkg-1.3_1.egg")) + ad.add(dist_from_fn("FooPkg-1.4-py2.4-win32.egg")) + ad.add(dist_from_fn("FooPkg-1.2-py2.4.egg")) + + # Name is in there now + self.assertTrue(ad['FooPkg']) + # But only 1 package + self.assertEqual(list(ad), ['foopkg']) + + # Distributions sort by version + self.assertEqual( + [dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2'] + ) + # Removing a distribution leaves sequence alone + ad.remove(ad['FooPkg'][1]) + self.assertEqual( + [dist.version for dist in ad['FooPkg']], ['1.4','1.2'] + ) + # And inserting adds them in order + ad.add(dist_from_fn("FooPkg-1.9.egg")) + self.assertEqual( + [dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2'] + ) + + ws = WorkingSet([]) + foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg") + foo14 = dist_from_fn("FooPkg-1.4-py2.4-win32.egg") + req, = parse_requirements("FooPkg>=1.3") + + # Nominal case: no distros on path, should yield all applicable + self.assertEqual(ad.best_match(req,ws).version, '1.9') + # If a matching distro is already installed, should return only that + ws.add(foo14) + self.assertEqual(ad.best_match(req,ws).version, '1.4') + + # If the first matching distro is unsuitable, it's a version conflict + ws = WorkingSet([]) + ws.add(foo12) + ws.add(foo14) + self.assertRaises(VersionConflict, ad.best_match, req, ws) + + # If more than one match on the path, the first one takes precedence + ws = WorkingSet([]) + ws.add(foo14) + ws.add(foo12) + ws.add(foo14) + self.assertEqual(ad.best_match(req,ws).version, '1.4') + + def checkFooPkg(self,d): + self.assertEqual(d.project_name, "FooPkg") + self.assertEqual(d.key, "foopkg") + self.assertEqual(d.version, "1.3-1") + self.assertEqual(d.py_version, "2.4") + self.assertEqual(d.platform, "win32") + self.assertEqual(d.parsed_version, parse_version("1.3-1")) + + def testDistroBasics(self): + d = Distribution( + "/some/path", + project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32" + ) + self.checkFooPkg(d) + + d = Distribution("/some/path") + self.assertEqual(d.py_version, sys.version[:3]) + self.assertEqual(d.platform, None) + + def testDistroParse(self): + d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg") + self.checkFooPkg(d) + d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg-info") + self.checkFooPkg(d) + + def testDistroMetadata(self): + d = Distribution( + "/some/path", project_name="FooPkg", py_version="2.4", platform="win32", + metadata = Metadata( + ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n") + ) + ) + self.checkFooPkg(d) + + def distRequires(self, txt): + return Distribution("/foo", metadata=Metadata(('depends.txt', txt))) + + def checkRequires(self, dist, txt, extras=()): + self.assertEqual( + list(dist.requires(extras)), + list(parse_requirements(txt)) + ) + + def testDistroDependsSimple(self): + for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0": + self.checkRequires(self.distRequires(v), v) + + def testResolve(self): + ad = pkg_resources.Environment([]) + ws = WorkingSet([]) + # Resolving no requirements -> nothing to install + self.assertEqual(list(ws.resolve([],ad)), []) + # Request something not in the collection -> DistributionNotFound + self.assertRaises( + pkg_resources.DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad + ) + Foo = Distribution.from_filename( + "/foo_dir/Foo-1.2.egg", + metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0")) + ) + ad.add(Foo) + ad.add(Distribution.from_filename("Foo-0.9.egg")) + + # Request thing(s) that are available -> list to activate + for i in range(3): + targets = list(ws.resolve(parse_requirements("Foo"), ad)) + self.assertEqual(targets, [Foo]) + list(map(ws.add,targets)) + self.assertRaises(VersionConflict, ws.resolve, + parse_requirements("Foo==0.9"), ad) + ws = WorkingSet([]) # reset + + # Request an extra that causes an unresolved dependency for "Baz" + self.assertRaises( + pkg_resources.DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad + ) + Baz = Distribution.from_filename( + "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo")) + ) + ad.add(Baz) + + # Activation list now includes resolved dependency + self.assertEqual( + list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz] + ) + # Requests for conflicting versions produce VersionConflict + self.assertRaises(VersionConflict, + ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad) + + def testDistroDependsOptions(self): + d = self.distRequires(""" + Twisted>=1.5 + [docgen] + ZConfig>=2.0 + docutils>=0.3 + [fastcgi] + fcgiapp>=0.1""") + self.checkRequires(d,"Twisted>=1.5") + self.checkRequires( + d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"] + ) + self.checkRequires( + d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"] + ) + self.checkRequires( + d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(), + ["docgen","fastcgi"] + ) + self.checkRequires( + d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(), + ["fastcgi", "docgen"] + ) + self.assertRaises(pkg_resources.UnknownExtra, d.requires, ["foo"]) + + +class EntryPointTests(TestCase): + + def assertfields(self, ep): + self.assertEqual(ep.name,"foo") + self.assertEqual(ep.module_name,"setuptools.tests.test_resources") + self.assertEqual(ep.attrs, ("EntryPointTests",)) + self.assertEqual(ep.extras, ("x",)) + self.assertTrue(ep.load() is EntryPointTests) + self.assertEqual( + str(ep), + "foo = setuptools.tests.test_resources:EntryPointTests [x]" + ) + + def setUp(self): + self.dist = Distribution.from_filename( + "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]'))) + + def testBasics(self): + ep = EntryPoint( + "foo", "setuptools.tests.test_resources", ["EntryPointTests"], + ["x"], self.dist + ) + self.assertfields(ep) + + def testParse(self): + s = "foo = setuptools.tests.test_resources:EntryPointTests [x]" + ep = EntryPoint.parse(s, self.dist) + self.assertfields(ep) + + ep = EntryPoint.parse("bar baz= spammity[PING]") + self.assertEqual(ep.name,"bar baz") + self.assertEqual(ep.module_name,"spammity") + self.assertEqual(ep.attrs, ()) + self.assertEqual(ep.extras, ("ping",)) + + ep = EntryPoint.parse(" fizzly = wocka:foo") + self.assertEqual(ep.name,"fizzly") + self.assertEqual(ep.module_name,"wocka") + self.assertEqual(ep.attrs, ("foo",)) + self.assertEqual(ep.extras, ()) + + def testRejects(self): + for ep in [ + "foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2", + ]: + try: EntryPoint.parse(ep) + except ValueError: pass + else: raise AssertionError("Should've been bad", ep) + + def checkSubMap(self, m): + self.assertEqual(len(m), len(self.submap_expect)) + for key, ep in iteritems(self.submap_expect): + self.assertEqual(repr(m.get(key)), repr(ep)) + + submap_expect = dict( + feature1=EntryPoint('feature1', 'somemodule', ['somefunction']), + feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']), + feature3=EntryPoint('feature3', 'this.module', extras=['something']) + ) + submap_str = """ + # define features for blah blah + feature1 = somemodule:somefunction + feature2 = another.module:SomeClass [extra1,extra2] + feature3 = this.module [something] + """ + + def testParseList(self): + self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str)) + self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar") + self.assertRaises(ValueError, EntryPoint.parse_group, "x", + ["foo=baz", "foo=bar"]) + + def testParseMap(self): + m = EntryPoint.parse_map({'xyz':self.submap_str}) + self.checkSubMap(m['xyz']) + self.assertEqual(list(m.keys()),['xyz']) + m = EntryPoint.parse_map("[xyz]\n"+self.submap_str) + self.checkSubMap(m['xyz']) + self.assertEqual(list(m.keys()),['xyz']) + self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"]) + self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str) + +class RequirementsTests(TestCase): + + def testBasics(self): + r = Requirement.parse("Twisted>=1.2") + self.assertEqual(str(r),"Twisted>=1.2") + self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')") + self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ())) + self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ())) + self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ())) + self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ())) + self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ())) + self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2")) + + def testOrdering(self): + r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ()) + r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ()) + self.assertEqual(r1,r2) + self.assertEqual(str(r1),str(r2)) + self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2") + + def testBasicContains(self): + r = Requirement("Twisted", [('>=','1.2')], ()) + foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg") + twist11 = Distribution.from_filename("Twisted-1.1.egg") + twist12 = Distribution.from_filename("Twisted-1.2.egg") + self.assertTrue(parse_version('1.2') in r) + self.assertTrue(parse_version('1.1') not in r) + self.assertTrue('1.2' in r) + self.assertTrue('1.1' not in r) + self.assertTrue(foo_dist not in r) + self.assertTrue(twist11 not in r) + self.assertTrue(twist12 in r) + + def testAdvancedContains(self): + r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5") + for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'): + self.assertTrue(v in r, (v,r)) + for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'): + self.assertTrue(v not in r, (v,r)) + + def testOptionsAndHashing(self): + r1 = Requirement.parse("Twisted[foo,bar]>=1.2") + r2 = Requirement.parse("Twisted[bar,FOO]>=1.2") + r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0") + self.assertEqual(r1,r2) + self.assertEqual(r1,r3) + self.assertEqual(r1.extras, ("foo","bar")) + self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized + self.assertEqual(hash(r1), hash(r2)) + self.assertEqual( + hash(r1), hash(("twisted", ((">=",parse_version("1.2")),), + frozenset(["foo","bar"]))) + ) + + def testVersionEquality(self): + r1 = Requirement.parse("foo==0.3a2") + r2 = Requirement.parse("foo!=0.3a4") + d = Distribution.from_filename + + self.assertTrue(d("foo-0.3a4.egg") not in r1) + self.assertTrue(d("foo-0.3a1.egg") not in r1) + self.assertTrue(d("foo-0.3a4.egg") not in r2) + + self.assertTrue(d("foo-0.3a2.egg") in r1) + self.assertTrue(d("foo-0.3a2.egg") in r2) + self.assertTrue(d("foo-0.3a3.egg") in r2) + self.assertTrue(d("foo-0.3a5.egg") in r2) + + def testSetuptoolsProjectName(self): + """ + The setuptools project should implement the setuptools package. + """ + + self.assertEqual( + Requirement.parse('setuptools').project_name, 'setuptools') + # setuptools 0.7 and higher means setuptools. + self.assertEqual( + Requirement.parse('setuptools == 0.7').project_name, 'setuptools') + self.assertEqual( + Requirement.parse('setuptools == 0.7a1').project_name, 'setuptools') + self.assertEqual( + Requirement.parse('setuptools >= 0.7').project_name, 'setuptools') + + +class ParseTests(TestCase): + + def testEmptyParse(self): + self.assertEqual(list(parse_requirements('')), []) + + def testYielding(self): + for inp,out in [ + ([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']), + (['x\n\n','y'], ['x','y']), + ]: + self.assertEqual(list(pkg_resources.yield_lines(inp)),out) + + def testSplitting(self): + sample = """ + x + [Y] + z + + a + [b ] + # foo + c + [ d] + [q] + v + """ + self.assertEqual(list(pkg_resources.split_sections(sample)), + [(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])] + ) + self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo")) + + def testSafeName(self): + self.assertEqual(safe_name("adns-python"), "adns-python") + self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") + self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils") + self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker") + self.assertNotEqual(safe_name("peak.web"), "peak-web") + + def testSafeVersion(self): + self.assertEqual(safe_version("1.2-1"), "1.2-1") + self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha") + self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521") + self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker") + self.assertEqual(safe_version("peak.web"), "peak.web") + + def testSimpleRequirements(self): + self.assertEqual( + list(parse_requirements('Twis-Ted>=1.2-1')), + [Requirement('Twis-Ted',[('>=','1.2-1')], ())] + ) + self.assertEqual( + list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')), + [Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())] + ) + self.assertEqual( + Requirement.parse("FooBar==1.99a3"), + Requirement("FooBar", [('==','1.99a3')], ()) + ) + self.assertRaises(ValueError,Requirement.parse,">=2.3") + self.assertRaises(ValueError,Requirement.parse,"x\\") + self.assertRaises(ValueError,Requirement.parse,"x==2 q") + self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2") + self.assertRaises(ValueError,Requirement.parse,"#") + + def testVersionEquality(self): + def c(s1,s2): + p1, p2 = parse_version(s1),parse_version(s2) + self.assertEqual(p1,p2, (s1,s2,p1,p2)) + + c('1.2-rc1', '1.2rc1') + c('0.4', '0.4.0') + c('0.4.0.0', '0.4.0') + c('0.4.0-0', '0.4-0') + c('0pl1', '0.0pl1') + c('0pre1', '0.0c1') + c('0.0.0preview1', '0c1') + c('0.0c1', '0-rc1') + c('1.2a1', '1.2.a.1') + c('1.2...a', '1.2a') + + def testVersionOrdering(self): + def c(s1,s2): + p1, p2 = parse_version(s1),parse_version(s2) + self.assertTrue(p1= (3,) and os.environ.get("LC_CTYPE") + in (None, "C", "POSIX")): + return + + class java: + class lang: + class System: + @staticmethod + def getProperty(property): + return "" + sys.modules["java"] = java + + platform = sys.platform + sys.platform = 'java1.5.0_13' + stdout, stderr = sys.stdout, sys.stderr + try: + # A mock sys.executable that uses a shebang line (this file) + exe = os.path.normpath(os.path.splitext(__file__)[0] + '.py') + self.assertEqual( + get_script_header('#!/usr/local/bin/python', executable=exe), + '#!/usr/bin/env %s\n' % exe) + + # Ensure we generate what is basically a broken shebang line + # when there's options, with a warning emitted + sys.stdout = sys.stderr = StringIO() + self.assertEqual(get_script_header('#!/usr/bin/python -x', + executable=exe), + '#!%s -x\n' % exe) + self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue()) + sys.stdout = sys.stderr = StringIO() + self.assertEqual(get_script_header('#!/usr/bin/python', + executable=self.non_ascii_exe), + '#!%s -x\n' % self.non_ascii_exe) + self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue()) + finally: + del sys.modules["java"] + sys.platform = platform + sys.stdout, sys.stderr = stdout, stderr + + +class NamespaceTests(TestCase): + + def setUp(self): + self._ns_pkgs = pkg_resources._namespace_packages.copy() + self._tmpdir = tempfile.mkdtemp(prefix="tests-setuptools-") + os.makedirs(os.path.join(self._tmpdir, "site-pkgs")) + self._prev_sys_path = sys.path[:] + sys.path.append(os.path.join(self._tmpdir, "site-pkgs")) + + def tearDown(self): + shutil.rmtree(self._tmpdir) + pkg_resources._namespace_packages = self._ns_pkgs.copy() + sys.path = self._prev_sys_path[:] + + def _assertIn(self, member, container): + """ assertIn and assertTrue does not exist in Python2.3""" + if member not in container: + standardMsg = '%s not found in %s' % (safe_repr(member), + safe_repr(container)) + self.fail(self._formatMessage(msg, standardMsg)) + + def test_two_levels_deep(self): + """ + Test nested namespace packages + Create namespace packages in the following tree : + site-packages-1/pkg1/pkg2 + site-packages-2/pkg1/pkg2 + Check both are in the _namespace_packages dict and that their __path__ + is correct + """ + sys.path.append(os.path.join(self._tmpdir, "site-pkgs2")) + os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2")) + os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")) + ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n" + for site in ["site-pkgs", "site-pkgs2"]: + pkg1_init = open(os.path.join(self._tmpdir, site, + "pkg1", "__init__.py"), "w") + pkg1_init.write(ns_str) + pkg1_init.close() + pkg2_init = open(os.path.join(self._tmpdir, site, + "pkg1", "pkg2", "__init__.py"), "w") + pkg2_init.write(ns_str) + pkg2_init.close() + import pkg1 + self._assertIn("pkg1", pkg_resources._namespace_packages.keys()) + try: + import pkg1.pkg2 + except ImportError: + self.fail("Setuptools tried to import the parent namespace package") + # check the _namespace_packages dict + self._assertIn("pkg1.pkg2", pkg_resources._namespace_packages.keys()) + self.assertEqual(pkg_resources._namespace_packages["pkg1"], ["pkg1.pkg2"]) + # check the __path__ attribute contains both paths + self.assertEqual(pkg1.pkg2.__path__, [ + os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"), + os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")]) diff --git a/awx/lib/site-packages/setuptools/tests/test_sandbox.py b/awx/lib/site-packages/setuptools/tests/test_sandbox.py new file mode 100644 index 0000000000..3dad137683 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_sandbox.py @@ -0,0 +1,79 @@ +"""develop tests +""" +import sys +import os +import shutil +import unittest +import tempfile +import types + +import pkg_resources +import setuptools.sandbox +from setuptools.sandbox import DirectorySandbox, SandboxViolation + +def has_win32com(): + """ + Run this to determine if the local machine has win32com, and if it + does, include additional tests. + """ + if not sys.platform.startswith('win32'): + return False + try: + mod = __import__('win32com') + except ImportError: + return False + return True + +class TestSandbox(unittest.TestCase): + + def setUp(self): + self.dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.dir) + + def test_devnull(self): + if sys.version < '2.4': + return + sandbox = DirectorySandbox(self.dir) + sandbox.run(self._file_writer(os.devnull)) + + def _file_writer(path): + def do_write(): + f = open(path, 'w') + f.write('xxx') + f.close() + return do_write + + _file_writer = staticmethod(_file_writer) + + if has_win32com(): + def test_win32com(self): + """ + win32com should not be prevented from caching COM interfaces + in gen_py. + """ + import win32com + gen_py = win32com.__gen_path__ + target = os.path.join(gen_py, 'test_write') + sandbox = DirectorySandbox(self.dir) + try: + try: + sandbox.run(self._file_writer(target)) + except SandboxViolation: + self.fail("Could not create gen_py file due to SandboxViolation") + finally: + if os.path.exists(target): os.remove(target) + + def test_setup_py_with_BOM(self): + """ + It should be possible to execute a setup.py with a Byte Order Mark + """ + target = pkg_resources.resource_filename(__name__, + 'script-with-bom.py') + namespace = types.ModuleType('namespace') + setuptools.sandbox.execfile(target, vars(namespace)) + assert namespace.result == 'passed' + +if __name__ == '__main__': + unittest.main() diff --git a/awx/lib/site-packages/setuptools/tests/test_sdist.py b/awx/lib/site-packages/setuptools/tests/test_sdist.py new file mode 100644 index 0000000000..438f7cedbd --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_sdist.py @@ -0,0 +1,400 @@ +# -*- coding: utf-8 -*- +"""sdist tests""" + + +import os +import shutil +import sys +import tempfile +import unittest +import unicodedata + +from setuptools.compat import StringIO, unicode +from setuptools.command.sdist import sdist +from setuptools.command.egg_info import manifest_maker +from setuptools.dist import Distribution + + +SETUP_ATTRS = { + 'name': 'sdist_test', + 'version': '0.0', + 'packages': ['sdist_test'], + 'package_data': {'sdist_test': ['*.txt']} +} + + +SETUP_PY = """\ +from setuptools import setup + +setup(**%r) +""" % SETUP_ATTRS + + +if sys.version_info >= (3,): + LATIN1_FILENAME = 'smörbröd.py'.encode('latin-1') +else: + LATIN1_FILENAME = 'sm\xf6rbr\xf6d.py' + + +# Cannot use context manager because of Python 2.4 +def quiet(): + global old_stdout, old_stderr + old_stdout, old_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = StringIO(), StringIO() + +def unquiet(): + sys.stdout, sys.stderr = old_stdout, old_stderr + + +# Fake byte literals for Python <= 2.5 +def b(s, encoding='utf-8'): + if sys.version_info >= (3,): + return s.encode(encoding) + return s + + +# Convert to POSIX path +def posix(path): + if sys.version_info >= (3,) and not isinstance(path, str): + return path.replace(os.sep.encode('ascii'), b('/')) + else: + return path.replace(os.sep, '/') + + +# HFS Plus uses decomposed UTF-8 +def decompose(path): + if isinstance(path, unicode): + return unicodedata.normalize('NFD', path) + try: + path = path.decode('utf-8') + path = unicodedata.normalize('NFD', path) + path = path.encode('utf-8') + except UnicodeError: + pass # Not UTF-8 + return path + + +class TestSdistTest(unittest.TestCase): + + def setUp(self): + self.temp_dir = tempfile.mkdtemp() + f = open(os.path.join(self.temp_dir, 'setup.py'), 'w') + f.write(SETUP_PY) + f.close() + # Set up the rest of the test package + test_pkg = os.path.join(self.temp_dir, 'sdist_test') + os.mkdir(test_pkg) + # *.rst was not included in package_data, so c.rst should not be + # automatically added to the manifest when not under version control + for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']: + # Just touch the files; their contents are irrelevant + open(os.path.join(test_pkg, fname), 'w').close() + + self.old_cwd = os.getcwd() + os.chdir(self.temp_dir) + + def tearDown(self): + os.chdir(self.old_cwd) + shutil.rmtree(self.temp_dir) + + def test_package_data_in_sdist(self): + """Regression test for pull request #4: ensures that files listed in + package_data are included in the manifest even if they're not added to + version control. + """ + + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + cmd = sdist(dist) + cmd.ensure_finalized() + + # squelch output + quiet() + try: + cmd.run() + finally: + unquiet() + + manifest = cmd.filelist.files + self.assertTrue(os.path.join('sdist_test', 'a.txt') in manifest) + self.assertTrue(os.path.join('sdist_test', 'b.txt') in manifest) + self.assertTrue(os.path.join('sdist_test', 'c.rst') not in manifest) + + def test_manifest_is_written_with_utf8_encoding(self): + # Test for #303. + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + mm = manifest_maker(dist) + mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt') + os.mkdir('sdist_test.egg-info') + + # UTF-8 filename + filename = os.path.join('sdist_test', 'smörbröd.py') + + # Add UTF-8 filename and write manifest + quiet() + try: + mm.run() + mm.filelist.files.append(filename) + mm.write_manifest() + finally: + unquiet() + + manifest = open(mm.manifest, 'rbU') + contents = manifest.read() + manifest.close() + + # The manifest should be UTF-8 encoded + try: + u_contents = contents.decode('UTF-8') + except UnicodeDecodeError: + e = sys.exc_info()[1] + self.fail(e) + + # The manifest should contain the UTF-8 filename + if sys.version_info >= (3,): + self.assertTrue(posix(filename) in u_contents) + else: + self.assertTrue(posix(filename) in contents) + + # Python 3 only + if sys.version_info >= (3,): + + def test_write_manifest_allows_utf8_filenames(self): + # Test for #303. + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + mm = manifest_maker(dist) + mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt') + os.mkdir('sdist_test.egg-info') + + # UTF-8 filename + filename = os.path.join(b('sdist_test'), b('smörbröd.py')) + + # Add filename and write manifest + quiet() + try: + mm.run() + u_filename = filename.decode('utf-8') + mm.filelist.files.append(u_filename) + # Re-write manifest + mm.write_manifest() + finally: + unquiet() + + manifest = open(mm.manifest, 'rbU') + contents = manifest.read() + manifest.close() + + # The manifest should be UTF-8 encoded + try: + contents.decode('UTF-8') + except UnicodeDecodeError: + e = sys.exc_info()[1] + self.fail(e) + + # The manifest should contain the UTF-8 filename + self.assertTrue(posix(filename) in contents) + + # The filelist should have been updated as well + self.assertTrue(u_filename in mm.filelist.files) + + def test_write_manifest_skips_non_utf8_filenames(self): + # Test for #303. + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + mm = manifest_maker(dist) + mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt') + os.mkdir('sdist_test.egg-info') + + # Latin-1 filename + filename = os.path.join(b('sdist_test'), LATIN1_FILENAME) + + # Add filename with surrogates and write manifest + quiet() + try: + mm.run() + u_filename = filename.decode('utf-8', 'surrogateescape') + mm.filelist.files.append(u_filename) + # Re-write manifest + mm.write_manifest() + finally: + unquiet() + + manifest = open(mm.manifest, 'rbU') + contents = manifest.read() + manifest.close() + + # The manifest should be UTF-8 encoded + try: + contents.decode('UTF-8') + except UnicodeDecodeError: + e = sys.exc_info()[1] + self.fail(e) + + # The Latin-1 filename should have been skipped + self.assertFalse(posix(filename) in contents) + + # The filelist should have been updated as well + self.assertFalse(u_filename in mm.filelist.files) + + def test_manifest_is_read_with_utf8_encoding(self): + # Test for #303. + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + cmd = sdist(dist) + cmd.ensure_finalized() + + # Create manifest + quiet() + try: + cmd.run() + finally: + unquiet() + + # Add UTF-8 filename to manifest + filename = os.path.join(b('sdist_test'), b('smörbröd.py')) + cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt') + manifest = open(cmd.manifest, 'ab') + manifest.write(b('\n')+filename) + manifest.close() + + # The file must exist to be included in the filelist + open(filename, 'w').close() + + # Re-read manifest + cmd.filelist.files = [] + quiet() + try: + cmd.read_manifest() + finally: + unquiet() + + # The filelist should contain the UTF-8 filename + if sys.version_info >= (3,): + filename = filename.decode('utf-8') + self.assertTrue(filename in cmd.filelist.files) + + # Python 3 only + if sys.version_info >= (3,): + + def test_read_manifest_skips_non_utf8_filenames(self): + # Test for #303. + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + cmd = sdist(dist) + cmd.ensure_finalized() + + # Create manifest + quiet() + try: + cmd.run() + finally: + unquiet() + + # Add Latin-1 filename to manifest + filename = os.path.join(b('sdist_test'), LATIN1_FILENAME) + cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt') + manifest = open(cmd.manifest, 'ab') + manifest.write(b('\n')+filename) + manifest.close() + + # The file must exist to be included in the filelist + open(filename, 'w').close() + + # Re-read manifest + cmd.filelist.files = [] + quiet() + try: + try: + cmd.read_manifest() + except UnicodeDecodeError: + e = sys.exc_info()[1] + self.fail(e) + finally: + unquiet() + + # The Latin-1 filename should have been skipped + filename = filename.decode('latin-1') + self.assertFalse(filename in cmd.filelist.files) + + def test_sdist_with_utf8_encoded_filename(self): + # Test for #303. + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + cmd = sdist(dist) + cmd.ensure_finalized() + + # UTF-8 filename + filename = os.path.join(b('sdist_test'), b('smörbröd.py')) + open(filename, 'w').close() + + quiet() + try: + cmd.run() + finally: + unquiet() + + if sys.platform == 'darwin': + filename = decompose(filename) + + if sys.version_info >= (3,): + fs_enc = sys.getfilesystemencoding() + + if sys.platform == 'win32': + if fs_enc == 'cp1252': + # Python 3 mangles the UTF-8 filename + filename = filename.decode('cp1252') + self.assertTrue(filename in cmd.filelist.files) + else: + filename = filename.decode('mbcs') + self.assertTrue(filename in cmd.filelist.files) + else: + filename = filename.decode('utf-8') + self.assertTrue(filename in cmd.filelist.files) + else: + self.assertTrue(filename in cmd.filelist.files) + + def test_sdist_with_latin1_encoded_filename(self): + # Test for #303. + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + cmd = sdist(dist) + cmd.ensure_finalized() + + # Latin-1 filename + filename = os.path.join(b('sdist_test'), LATIN1_FILENAME) + open(filename, 'w').close() + self.assertTrue(os.path.isfile(filename)) + + quiet() + try: + cmd.run() + finally: + unquiet() + + if sys.version_info >= (3,): + #not all windows systems have a default FS encoding of cp1252 + if sys.platform == 'win32': + # Latin-1 is similar to Windows-1252 however + # on mbcs filesys it is not in latin-1 encoding + fs_enc = sys.getfilesystemencoding() + if fs_enc == 'mbcs': + filename = filename.decode('mbcs') + else: + filename = filename.decode('latin-1') + + self.assertTrue(filename in cmd.filelist.files) + else: + # The Latin-1 filename should have been skipped + filename = filename.decode('latin-1') + self.assertFalse(filename in cmd.filelist.files) + else: + # No conversion takes place under Python 2 and the file + # is included. We shall keep it that way for BBB. + self.assertTrue(filename in cmd.filelist.files) + + +def test_suite(): + return unittest.defaultTestLoader.loadTestsFromName(__name__) + diff --git a/awx/lib/site-packages/setuptools/tests/test_test.py b/awx/lib/site-packages/setuptools/tests/test_test.py new file mode 100644 index 0000000000..7a06a40329 --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_test.py @@ -0,0 +1,124 @@ +# -*- coding: UTF-8 -*- + +"""develop tests +""" +import sys +import os, shutil, tempfile, unittest +import tempfile +import site + +from distutils.errors import DistutilsError +from setuptools.compat import StringIO +from setuptools.command.test import test +from setuptools.command import easy_install as easy_install_pkg +from setuptools.dist import Distribution + +SETUP_PY = """\ +from setuptools import setup + +setup(name='foo', + packages=['name', 'name.space', 'name.space.tests'], + namespace_packages=['name'], + test_suite='name.space.tests.test_suite', +) +""" + +NS_INIT = """# -*- coding: Latin-1 -*- +# Söme Arbiträry Ünicode to test Issüé 310 +try: + __import__('pkg_resources').declare_namespace(__name__) +except ImportError: + from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) +""" +# Make sure this is Latin-1 binary, before writing: +if sys.version_info < (3,): + NS_INIT = NS_INIT.decode('UTF-8') +NS_INIT = NS_INIT.encode('Latin-1') + +TEST_PY = """import unittest + +class TestTest(unittest.TestCase): + def test_test(self): + print "Foo" # Should fail under Python 3 unless 2to3 is used + +test_suite = unittest.makeSuite(TestTest) +""" + +class TestTestTest(unittest.TestCase): + + def setUp(self): + if sys.version < "2.6" or hasattr(sys, 'real_prefix'): + return + + # Directory structure + self.dir = tempfile.mkdtemp() + os.mkdir(os.path.join(self.dir, 'name')) + os.mkdir(os.path.join(self.dir, 'name', 'space')) + os.mkdir(os.path.join(self.dir, 'name', 'space', 'tests')) + # setup.py + setup = os.path.join(self.dir, 'setup.py') + f = open(setup, 'wt') + f.write(SETUP_PY) + f.close() + self.old_cwd = os.getcwd() + # name/__init__.py + init = os.path.join(self.dir, 'name', '__init__.py') + f = open(init, 'wb') + f.write(NS_INIT) + f.close() + # name/space/__init__.py + init = os.path.join(self.dir, 'name', 'space', '__init__.py') + f = open(init, 'wt') + f.write('#empty\n') + f.close() + # name/space/tests/__init__.py + init = os.path.join(self.dir, 'name', 'space', 'tests', '__init__.py') + f = open(init, 'wt') + f.write(TEST_PY) + f.close() + + os.chdir(self.dir) + self.old_base = site.USER_BASE + site.USER_BASE = tempfile.mkdtemp() + self.old_site = site.USER_SITE + site.USER_SITE = tempfile.mkdtemp() + + def tearDown(self): + if sys.version < "2.6" or hasattr(sys, 'real_prefix'): + return + + os.chdir(self.old_cwd) + shutil.rmtree(self.dir) + shutil.rmtree(site.USER_BASE) + shutil.rmtree(site.USER_SITE) + site.USER_BASE = self.old_base + site.USER_SITE = self.old_site + + def test_test(self): + if sys.version < "2.6" or hasattr(sys, 'real_prefix'): + return + + dist = Distribution(dict( + name='foo', + packages=['name', 'name.space', 'name.space.tests'], + namespace_packages=['name'], + test_suite='name.space.tests.test_suite', + use_2to3=True, + )) + dist.script_name = 'setup.py' + cmd = test(dist) + cmd.user = 1 + cmd.ensure_finalized() + cmd.install_dir = site.USER_SITE + cmd.user = 1 + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + try: # try/except/finally doesn't work in Python 2.4, so we need nested try-statements. + cmd.run() + except SystemExit: # The test runner calls sys.exit, stop that making an error. + pass + finally: + sys.stdout = old_stdout + diff --git a/awx/lib/site-packages/setuptools/tests/test_upload_docs.py b/awx/lib/site-packages/setuptools/tests/test_upload_docs.py new file mode 100644 index 0000000000..769f16cc5a --- /dev/null +++ b/awx/lib/site-packages/setuptools/tests/test_upload_docs.py @@ -0,0 +1,72 @@ +"""build_ext tests +""" +import sys, os, shutil, tempfile, unittest, site, zipfile +from setuptools.command.upload_docs import upload_docs +from setuptools.dist import Distribution + +SETUP_PY = """\ +from setuptools import setup + +setup(name='foo') +""" + +class TestUploadDocsTest(unittest.TestCase): + def setUp(self): + self.dir = tempfile.mkdtemp() + setup = os.path.join(self.dir, 'setup.py') + f = open(setup, 'w') + f.write(SETUP_PY) + f.close() + self.old_cwd = os.getcwd() + os.chdir(self.dir) + + self.upload_dir = os.path.join(self.dir, 'build') + os.mkdir(self.upload_dir) + + # A test document. + f = open(os.path.join(self.upload_dir, 'index.html'), 'w') + f.write("Hello world.") + f.close() + + # An empty folder. + os.mkdir(os.path.join(self.upload_dir, 'empty')) + + if sys.version >= "2.6": + self.old_base = site.USER_BASE + site.USER_BASE = upload_docs.USER_BASE = tempfile.mkdtemp() + self.old_site = site.USER_SITE + site.USER_SITE = upload_docs.USER_SITE = tempfile.mkdtemp() + + def tearDown(self): + os.chdir(self.old_cwd) + shutil.rmtree(self.dir) + if sys.version >= "2.6": + shutil.rmtree(site.USER_BASE) + shutil.rmtree(site.USER_SITE) + site.USER_BASE = self.old_base + site.USER_SITE = self.old_site + + def test_create_zipfile(self): + # Test to make sure zipfile creation handles common cases. + # This explicitly includes a folder containing an empty folder. + + dist = Distribution() + + cmd = upload_docs(dist) + cmd.upload_dir = self.upload_dir + cmd.target_dir = self.upload_dir + tmp_dir = tempfile.mkdtemp() + tmp_file = os.path.join(tmp_dir, 'foo.zip') + try: + zip_file = cmd.create_zipfile(tmp_file) + + assert zipfile.is_zipfile(tmp_file) + + zip_file = zipfile.ZipFile(tmp_file) # woh... + + assert zip_file.namelist() == ['index.html'] + + zip_file.close() + finally: + shutil.rmtree(tmp_dir) + diff --git a/awx/lib/site-packages/setuptools/version.py b/awx/lib/site-packages/setuptools/version.py new file mode 100644 index 0000000000..6ebd335c55 --- /dev/null +++ b/awx/lib/site-packages/setuptools/version.py @@ -0,0 +1 @@ +__version__ = '1.1.6' diff --git a/awx/lib/site-packages/simplejson/__init__.py b/awx/lib/site-packages/simplejson/__init__.py new file mode 100644 index 0000000000..7fc81537ff --- /dev/null +++ b/awx/lib/site-packages/simplejson/__init__.py @@ -0,0 +1,547 @@ +r"""JSON (JavaScript Object Notation) is a subset of +JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data +interchange format. + +:mod:`simplejson` exposes an API familiar to users of the standard library +:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained +version of the :mod:`json` library contained in Python 2.6, but maintains +compatibility with Python 2.4 and Python 2.5 and (currently) has +significant performance advantages, even without using the optional C +extension for speedups. + +Encoding basic Python object hierarchies:: + + >>> import simplejson as json + >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + >>> print(json.dumps("\"foo\bar")) + "\"foo\bar" + >>> print(json.dumps(u'\u1234')) + "\u1234" + >>> print(json.dumps('\\')) + "\\" + >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)) + {"a": 0, "b": 0, "c": 0} + >>> from simplejson.compat import StringIO + >>> io = StringIO() + >>> json.dump(['streaming API'], io) + >>> io.getvalue() + '["streaming API"]' + +Compact encoding:: + + >>> import simplejson as json + >>> obj = [1,2,3,{'4': 5, '6': 7}] + >>> json.dumps(obj, separators=(',',':'), sort_keys=True) + '[1,2,3,{"4":5,"6":7}]' + +Pretty printing:: + + >>> import simplejson as json + >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')) + { + "4": 5, + "6": 7 + } + +Decoding JSON:: + + >>> import simplejson as json + >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] + >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj + True + >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' + True + >>> from simplejson.compat import StringIO + >>> io = StringIO('["streaming API"]') + >>> json.load(io)[0] == 'streaming API' + True + +Specializing JSON object decoding:: + + >>> import simplejson as json + >>> def as_complex(dct): + ... if '__complex__' in dct: + ... return complex(dct['real'], dct['imag']) + ... return dct + ... + >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', + ... object_hook=as_complex) + (1+2j) + >>> from decimal import Decimal + >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') + True + +Specializing JSON object encoding:: + + >>> import simplejson as json + >>> def encode_complex(obj): + ... if isinstance(obj, complex): + ... return [obj.real, obj.imag] + ... raise TypeError(repr(o) + " is not JSON serializable") + ... + >>> json.dumps(2 + 1j, default=encode_complex) + '[2.0, 1.0]' + >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) + '[2.0, 1.0]' + >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) + '[2.0, 1.0]' + + +Using simplejson.tool from the shell to validate and pretty-print:: + + $ echo '{"json":"obj"}' | python -m simplejson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m simplejson.tool + Expecting property name: line 1 column 3 (char 2) +""" +from __future__ import absolute_import +__version__ = '3.3.0' +__all__ = [ + 'dump', 'dumps', 'load', 'loads', + 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', + 'OrderedDict', 'simple_first', +] + +__author__ = 'Bob Ippolito ' + +from decimal import Decimal + +from .scanner import JSONDecodeError +from .decoder import JSONDecoder +from .encoder import JSONEncoder, JSONEncoderForHTML +def _import_OrderedDict(): + import collections + try: + return collections.OrderedDict + except AttributeError: + from . import ordered_dict + return ordered_dict.OrderedDict +OrderedDict = _import_OrderedDict() + +def _import_c_make_encoder(): + try: + from ._speedups import make_encoder + return make_encoder + except ImportError: + return None + +_default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding='utf-8', + default=None, + use_decimal=True, + namedtuple_as_object=True, + tuple_as_array=True, + bigint_as_string=False, + item_sort_key=None, + for_json=False, + ignore_nan=False, +) + +def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', default=None, use_decimal=True, + namedtuple_as_object=True, tuple_as_array=True, + bigint_as_string=False, sort_keys=False, item_sort_key=None, + for_json=False, ignore_nan=False, **kw): + """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If *skipkeys* is true then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If *ensure_ascii* is false, then the some chunks written to ``fp`` + may be ``unicode`` instances, subject to normal Python ``str`` to + ``unicode`` coercion rules. Unless ``fp.write()`` explicitly + understands ``unicode`` (as in ``codecs.getwriter()``) this is likely + to cause an error. + + If *check_circular* is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If *allow_nan* is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) + in strict compliance of the original JSON specification, instead of using + the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See + *ignore_nan* for ECMA-262 compliant behavior. + + If *indent* is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If specified, *separators* should be an + ``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')`` + if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most + compact JSON representation, you should specify ``(',', ':')`` to eliminate + whitespace. + + *encoding* is the character encoding for str instances, default is UTF-8. + + *default(obj)* is a function that should return a serializable version + of obj or raise ``TypeError``. The default simply raises ``TypeError``. + + If *use_decimal* is true (default: ``True``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + If *namedtuple_as_object* is true (default: ``True``), + :class:`tuple` subclasses with ``_asdict()`` methods will be encoded + as JSON objects. + + If *tuple_as_array* is true (default: ``True``), + :class:`tuple` (and subclasses) will be encoded as JSON arrays. + + If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. Note that this is still a + lossy operation that will not round-trip correctly and should be used + sparingly. + + If specified, *item_sort_key* is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. This option takes precedence over + *sort_keys*. + + If *sort_keys* is true (default: ``False``), the output of dictionaries + will be sorted by item. + + If *for_json* is true (default: ``False``), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + If *ignore_nan* is true (default: ``False``), then out of range + :class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as + ``null`` in compliance with the ECMA-262 specification. If true, this will + override *allow_nan*. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead + of subclassing whenever possible. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and use_decimal + and namedtuple_as_object and tuple_as_array + and not bigint_as_string and not item_sort_key + and not for_json and not ignore_nan and not kw): + iterable = _default_encoder.iterencode(obj) + else: + if cls is None: + cls = JSONEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, + default=default, use_decimal=use_decimal, + namedtuple_as_object=namedtuple_as_object, + tuple_as_array=tuple_as_array, + bigint_as_string=bigint_as_string, + sort_keys=sort_keys, + item_sort_key=item_sort_key, + for_json=for_json, + ignore_nan=ignore_nan, + **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', default=None, use_decimal=True, + namedtuple_as_object=True, tuple_as_array=True, + bigint_as_string=False, sort_keys=False, item_sort_key=None, + for_json=False, ignore_nan=False, **kw): + """Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is false then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the return value will be a + ``unicode`` instance subject to normal Python ``str`` to ``unicode`` + coercion rules instead of being escaped to an ASCII ``str``. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If specified, ``separators`` should be an + ``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')`` + if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most + compact JSON representation, you should specify ``(',', ':')`` to eliminate + whitespace. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *use_decimal* is true (default: ``True``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + If *namedtuple_as_object* is true (default: ``True``), + :class:`tuple` subclasses with ``_asdict()`` methods will be encoded + as JSON objects. + + If *tuple_as_array* is true (default: ``True``), + :class:`tuple` (and subclasses) will be encoded as JSON arrays. + + If *bigint_as_string* is true (not the default), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. + + If specified, *item_sort_key* is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. This option takes precendence over + *sort_keys*. + + If *sort_keys* is true (default: ``False``), the output of dictionaries + will be sorted by item. + + If *for_json* is true (default: ``False``), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + If *ignore_nan* is true (default: ``False``), then out of range + :class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as + ``null`` in compliance with the ECMA-262 specification. If true, this will + override *allow_nan*. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing + whenever possible. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and use_decimal + and namedtuple_as_object and tuple_as_array + and not bigint_as_string and not sort_keys + and not item_sort_key and not for_json + and not ignore_nan and not kw): + return _default_encoder.encode(obj) + if cls is None: + cls = JSONEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, default=default, + use_decimal=use_decimal, + namedtuple_as_object=namedtuple_as_object, + tuple_as_array=tuple_as_array, + bigint_as_string=bigint_as_string, + sort_keys=sort_keys, + item_sort_key=item_sort_key, + for_json=for_json, + ignore_nan=ignore_nan, + **kw).encode(obj) + + +_default_decoder = JSONDecoder(encoding=None, object_hook=None, + object_pairs_hook=None) + + +def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, + use_decimal=False, namedtuple_as_object=True, tuple_as_array=True, + **kw): + """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing + a JSON document) to a Python object. + + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + If *use_decimal* is true (default: ``False``) then it implies + parse_float=decimal.Decimal for parity with ``dump``. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead + of subclassing whenever possible. + + """ + return loads(fp.read(), + encoding=encoding, cls=cls, object_hook=object_hook, + parse_float=parse_float, parse_int=parse_int, + parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, + use_decimal=use_decimal, **kw) + + +def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, + use_decimal=False, **kw): + """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON + document) to a Python object. + + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + If *use_decimal* is true (default: ``False``) then it implies + parse_float=decimal.Decimal for parity with ``dump``. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead + of subclassing whenever possible. + + """ + if (cls is None and encoding is None and object_hook is None and + parse_int is None and parse_float is None and + parse_constant is None and object_pairs_hook is None + and not use_decimal and not kw): + return _default_decoder.decode(s) + if cls is None: + cls = JSONDecoder + if object_hook is not None: + kw['object_hook'] = object_hook + if object_pairs_hook is not None: + kw['object_pairs_hook'] = object_pairs_hook + if parse_float is not None: + kw['parse_float'] = parse_float + if parse_int is not None: + kw['parse_int'] = parse_int + if parse_constant is not None: + kw['parse_constant'] = parse_constant + if use_decimal: + if parse_float is not None: + raise TypeError("use_decimal=True implies parse_float=Decimal") + kw['parse_float'] = Decimal + return cls(encoding=encoding, **kw).decode(s) + + +def _toggle_speedups(enabled): + from . import decoder as dec + from . import encoder as enc + from . import scanner as scan + c_make_encoder = _import_c_make_encoder() + if enabled: + dec.scanstring = dec.c_scanstring or dec.py_scanstring + enc.c_make_encoder = c_make_encoder + enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or + enc.py_encode_basestring_ascii) + scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner + else: + dec.scanstring = dec.py_scanstring + enc.c_make_encoder = None + enc.encode_basestring_ascii = enc.py_encode_basestring_ascii + scan.make_scanner = scan.py_make_scanner + dec.make_scanner = scan.make_scanner + global _default_decoder + _default_decoder = JSONDecoder( + encoding=None, + object_hook=None, + object_pairs_hook=None, + ) + global _default_encoder + _default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding='utf-8', + default=None, + ) + +def simple_first(kv): + """Helper function to pass to item_sort_key to sort simple + elements to the top, then container elements. + """ + return (isinstance(kv[1], (list, dict, tuple)), kv[0]) diff --git a/awx/lib/site-packages/simplejson/compat.py b/awx/lib/site-packages/simplejson/compat.py new file mode 100644 index 0000000000..449e48abb7 --- /dev/null +++ b/awx/lib/site-packages/simplejson/compat.py @@ -0,0 +1,43 @@ +"""Python 3 compatibility shims +""" +import sys +if sys.version_info[0] < 3: + PY3 = False + def b(s): + return s + def u(s): + return unicode(s, 'unicode_escape') + import cStringIO as StringIO + StringIO = BytesIO = StringIO.StringIO + text_type = unicode + binary_type = str + string_types = (basestring,) + integer_types = (int, long) + unichr = unichr + reload_module = reload + def fromhex(s): + return s.decode('hex') + +else: + PY3 = True + from imp import reload as reload_module + import codecs + def b(s): + return codecs.latin_1_encode(s)[0] + def u(s): + return s + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + text_type = str + binary_type = bytes + string_types = (str,) + integer_types = (int,) + + def unichr(s): + return u(chr(s)) + + def fromhex(s): + return bytes.fromhex(s) + +long_type = integer_types[-1] diff --git a/awx/lib/site-packages/simplejson/decoder.py b/awx/lib/site-packages/simplejson/decoder.py new file mode 100644 index 0000000000..5ccb450476 --- /dev/null +++ b/awx/lib/site-packages/simplejson/decoder.py @@ -0,0 +1,389 @@ +"""Implementation of JSONDecoder +""" +from __future__ import absolute_import +import re +import sys +import struct +from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr +from .scanner import make_scanner, JSONDecodeError + +def _import_c_scanstring(): + try: + from ._speedups import scanstring + return scanstring + except ImportError: + return None +c_scanstring = _import_c_scanstring() + +# NOTE (3.1.0): JSONDecodeError may still be imported from this module for +# compatibility, but it was never in the __all__ +__all__ = ['JSONDecoder'] + +FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL + +def _floatconstants(): + _BYTES = fromhex('7FF80000000000007FF0000000000000') + # The struct module in Python 2.4 would get frexp() out of range here + # when an endian is specified in the format string. Fixed in Python 2.5+ + if sys.byteorder != 'big': + _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] + nan, inf = struct.unpack('dd', _BYTES) + return nan, inf, -inf + +NaN, PosInf, NegInf = _floatconstants() + +_CONSTANTS = { + '-Infinity': NegInf, + 'Infinity': PosInf, + 'NaN': NaN, +} + +STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) +BACKSLASH = { + '"': u('"'), '\\': u('\u005c'), '/': u('/'), + 'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'), +} + +DEFAULT_ENCODING = "utf-8" + +def py_scanstring(s, end, encoding=None, strict=True, + _b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join, + _PY3=PY3, _maxunicode=sys.maxunicode): + """Scan the string s for a JSON string. End is the index of the + character in s after the quote that started the JSON string. + Unescapes all valid JSON string escape sequences and raises ValueError + on attempt to decode an invalid string. If strict is False then literal + control characters are allowed in the string. + + Returns a tuple of the decoded string and the index of the character in s + after the end quote.""" + if encoding is None: + encoding = DEFAULT_ENCODING + chunks = [] + _append = chunks.append + begin = end - 1 + while 1: + chunk = _m(s, end) + if chunk is None: + raise JSONDecodeError( + "Unterminated string starting at", s, begin) + end = chunk.end() + content, terminator = chunk.groups() + # Content is contains zero or more unescaped string characters + if content: + if not _PY3 and not isinstance(content, text_type): + content = text_type(content, encoding) + _append(content) + # Terminator is the end of string, a literal control character, + # or a backslash denoting that an escape sequence follows + if terminator == '"': + break + elif terminator != '\\': + if strict: + msg = "Invalid control character %r at" + raise JSONDecodeError(msg, s, end) + else: + _append(terminator) + continue + try: + esc = s[end] + except IndexError: + raise JSONDecodeError( + "Unterminated string starting at", s, begin) + # If not a unicode escape sequence, must be in the lookup table + if esc != 'u': + try: + char = _b[esc] + except KeyError: + msg = "Invalid \\X escape sequence %r" + raise JSONDecodeError(msg, s, end) + end += 1 + else: + # Unicode escape sequence + msg = "Invalid \\uXXXX escape sequence" + esc = s[end + 1:end + 5] + escX = esc[1:2] + if len(esc) != 4 or escX == 'x' or escX == 'X': + raise JSONDecodeError(msg, s, end - 1) + try: + uni = int(esc, 16) + except ValueError: + raise JSONDecodeError(msg, s, end - 1) + end += 5 + # Check for surrogate pair on UCS-4 systems + # Note that this will join high/low surrogate pairs + # but will also pass unpaired surrogates through + if (_maxunicode > 65535 and + uni & 0xfc00 == 0xd800 and + s[end:end + 2] == '\\u'): + esc2 = s[end + 2:end + 6] + escX = esc2[1:2] + if len(esc2) == 4 and not (escX == 'x' or escX == 'X'): + try: + uni2 = int(esc2, 16) + except ValueError: + raise JSONDecodeError(msg, s, end) + if uni2 & 0xfc00 == 0xdc00: + uni = 0x10000 + (((uni - 0xd800) << 10) | + (uni2 - 0xdc00)) + end += 6 + char = unichr(uni) + # Append the unescaped character + _append(char) + return _join(chunks), end + + +# Use speedup if available +scanstring = c_scanstring or py_scanstring + +WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS) +WHITESPACE_STR = ' \t\n\r' + +def JSONObject(state, encoding, strict, scan_once, object_hook, + object_pairs_hook, memo=None, + _w=WHITESPACE.match, _ws=WHITESPACE_STR): + (s, end) = state + # Backwards compatibility + if memo is None: + memo = {} + memo_get = memo.setdefault + pairs = [] + # Use a slice to prevent IndexError from being raised, the following + # check will raise a more specific ValueError if the string is empty + nextchar = s[end:end + 1] + # Normally we expect nextchar == '"' + if nextchar != '"': + if nextchar in _ws: + end = _w(s, end).end() + nextchar = s[end:end + 1] + # Trivial empty object + if nextchar == '}': + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + 1 + pairs = {} + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end + 1 + elif nextchar != '"': + raise JSONDecodeError( + "Expecting property name enclosed in double quotes", + s, end) + end += 1 + while True: + key, end = scanstring(s, end, encoding, strict) + key = memo_get(key, key) + + # To skip some function call overhead we optimize the fast paths where + # the JSON key separator is ": " or just ":". + if s[end:end + 1] != ':': + end = _w(s, end).end() + if s[end:end + 1] != ':': + raise JSONDecodeError("Expecting ':' delimiter", s, end) + + end += 1 + + try: + if s[end] in _ws: + end += 1 + if s[end] in _ws: + end = _w(s, end + 1).end() + except IndexError: + pass + + value, end = scan_once(s, end) + pairs.append((key, value)) + + try: + nextchar = s[end] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end] + except IndexError: + nextchar = '' + end += 1 + + if nextchar == '}': + break + elif nextchar != ',': + raise JSONDecodeError("Expecting ',' delimiter or '}'", s, end - 1) + + try: + nextchar = s[end] + if nextchar in _ws: + end += 1 + nextchar = s[end] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end] + except IndexError: + nextchar = '' + + end += 1 + if nextchar != '"': + raise JSONDecodeError( + "Expecting property name enclosed in double quotes", + s, end - 1) + + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + pairs = dict(pairs) + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end + +def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): + (s, end) = state + values = [] + nextchar = s[end:end + 1] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end:end + 1] + # Look-ahead for trivial empty array + if nextchar == ']': + return values, end + 1 + elif nextchar == '': + raise JSONDecodeError("Expecting value or ']'", s, end) + _append = values.append + while True: + value, end = scan_once(s, end) + _append(value) + nextchar = s[end:end + 1] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar == ']': + break + elif nextchar != ',': + raise JSONDecodeError("Expecting ',' delimiter or ']'", s, end - 1) + + try: + if s[end] in _ws: + end += 1 + if s[end] in _ws: + end = _w(s, end + 1).end() + except IndexError: + pass + + return values, end + +class JSONDecoder(object): + """Simple JSON decoder + + Performs the following translations in decoding by default: + + +---------------+-------------------+ + | JSON | Python | + +===============+===================+ + | object | dict | + +---------------+-------------------+ + | array | list | + +---------------+-------------------+ + | string | unicode | + +---------------+-------------------+ + | number (int) | int, long | + +---------------+-------------------+ + | number (real) | float | + +---------------+-------------------+ + | true | True | + +---------------+-------------------+ + | false | False | + +---------------+-------------------+ + | null | None | + +---------------+-------------------+ + + It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as + their corresponding ``float`` values, which is outside the JSON spec. + + """ + + def __init__(self, encoding=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, strict=True, + object_pairs_hook=None): + """ + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + *strict* controls the parser's behavior when it encounters an + invalid control character in a string. The default setting of + ``True`` means that unescaped control characters are parse errors, if + ``False`` then control characters will be allowed in strings. + + """ + if encoding is None: + encoding = DEFAULT_ENCODING + self.encoding = encoding + self.object_hook = object_hook + self.object_pairs_hook = object_pairs_hook + self.parse_float = parse_float or float + self.parse_int = parse_int or int + self.parse_constant = parse_constant or _CONSTANTS.__getitem__ + self.strict = strict + self.parse_object = JSONObject + self.parse_array = JSONArray + self.parse_string = scanstring + self.memo = {} + self.scan_once = make_scanner(self) + + def decode(self, s, _w=WHITESPACE.match, _PY3=PY3): + """Return the Python representation of ``s`` (a ``str`` or ``unicode`` + instance containing a JSON document) + + """ + if _PY3 and isinstance(s, binary_type): + s = s.decode(self.encoding) + obj, end = self.raw_decode(s) + end = _w(s, end).end() + if end != len(s): + raise JSONDecodeError("Extra data", s, end, len(s)) + return obj + + def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3): + """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` + beginning with a JSON document) and return a 2-tuple of the Python + representation and the index in ``s`` where the document ended. + Optionally, ``idx`` can be used to specify an offset in ``s`` where + the JSON document begins. + + This can be used to decode a JSON document from a string that may + have extraneous data at the end. + + """ + if _PY3 and not isinstance(s, text_type): + raise TypeError("Input string must be text, not bytes") + return self.scan_once(s, idx=_w(s, idx).end()) diff --git a/awx/lib/site-packages/simplejson/encoder.py b/awx/lib/site-packages/simplejson/encoder.py new file mode 100644 index 0000000000..9815ee5210 --- /dev/null +++ b/awx/lib/site-packages/simplejson/encoder.py @@ -0,0 +1,628 @@ +"""Implementation of JSONEncoder +""" +from __future__ import absolute_import +import re +from operator import itemgetter +from decimal import Decimal +from .compat import u, unichr, binary_type, string_types, integer_types, PY3 +def _import_speedups(): + try: + from . import _speedups + return _speedups.encode_basestring_ascii, _speedups.make_encoder + except ImportError: + return None, None +c_encode_basestring_ascii, c_make_encoder = _import_speedups() + +from simplejson.decoder import PosInf + +#ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]') +# This is required because u() will mangle the string and ur'' isn't valid +# python3 syntax +ESCAPE = re.compile(u'[\\x00-\\x1f\\\\"\\b\\f\\n\\r\\t\u2028\u2029]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(r'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) +for i in [0x2028, 0x2029]: + ESCAPE_DCT.setdefault(unichr(i), '\\u%04x' % (i,)) + +FLOAT_REPR = repr + +def encode_basestring(s, _PY3=PY3, _q=u('"')): + """Return a JSON representation of a Python string + + """ + if _PY3: + if isinstance(s, binary_type): + s = s.decode('utf-8') + else: + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + return ESCAPE_DCT[match.group(0)] + return _q + ESCAPE.sub(replace, s) + _q + + +def py_encode_basestring_ascii(s, _PY3=PY3): + """Return an ASCII-only JSON representation of a Python string + + """ + if _PY3: + if isinstance(s, binary_type): + s = s.decode('utf-8') + else: + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + #return '\\u{0:04x}'.format(n) + return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '\\u%04x\\u%04x' % (s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +encode_basestring_ascii = ( + c_encode_basestring_ascii or py_encode_basestring_ascii) + +class JSONEncoder(object): + """Extensible JSON encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict, namedtuple | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + item_separator = ', ' + key_separator = ': ' + def __init__(self, skipkeys=False, ensure_ascii=True, + check_circular=True, allow_nan=True, sort_keys=False, + indent=None, separators=None, encoding='utf-8', default=None, + use_decimal=True, namedtuple_as_object=True, + tuple_as_array=True, bigint_as_string=False, + item_sort_key=None, for_json=False, ignore_nan=False): + """Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is false, then it is a TypeError to attempt + encoding of keys that are not str, int, long, float or None. If + skipkeys is True, such items are simply skipped. + + If ensure_ascii is true, the output is guaranteed to be str + objects with all incoming unicode characters escaped. If + ensure_ascii is false, the output will be unicode object. + + If check_circular is true, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an OverflowError). + Otherwise, no such check takes place. + + If allow_nan is true, then NaN, Infinity, and -Infinity will be + encoded as such. This behavior is not JSON specification compliant, + but is consistent with most JavaScript based encoders and decoders. + Otherwise, it will be a ValueError to encode such floats. + + If sort_keys is true, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If specified, separators should be an (item_separator, key_separator) + tuple. The default is (', ', ': ') if *indent* is ``None`` and + (',', ': ') otherwise. To get the most compact JSON representation, + you should specify (',', ':') to eliminate whitespace. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON encodable + version of the object or raise a ``TypeError``. + + If encoding is not None, then all input strings will be + transformed into unicode using that encoding prior to JSON-encoding. + The default is UTF-8. + + If use_decimal is true (not the default), ``decimal.Decimal`` will + be supported directly by the encoder. For the inverse, decode JSON + with ``parse_float=decimal.Decimal``. + + If namedtuple_as_object is true (the default), objects with + ``_asdict()`` methods will be encoded as JSON objects. + + If tuple_as_array is true (the default), tuple (and subclasses) will + be encoded as JSON arrays. + + If bigint_as_string is true (not the default), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. + + If specified, item_sort_key is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. + + If for_json is true (not the default), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + If *ignore_nan* is true (default: ``False``), then out of range + :class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized + as ``null`` in compliance with the ECMA-262 specification. If true, + this will override *allow_nan*. + + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.allow_nan = allow_nan + self.sort_keys = sort_keys + self.use_decimal = use_decimal + self.namedtuple_as_object = namedtuple_as_object + self.tuple_as_array = tuple_as_array + self.bigint_as_string = bigint_as_string + self.item_sort_key = item_sort_key + self.for_json = for_json + self.ignore_nan = ignore_nan + if indent is not None and not isinstance(indent, string_types): + indent = indent * ' ' + self.indent = indent + if separators is not None: + self.item_separator, self.key_separator = separators + elif indent is not None: + self.item_separator = ',' + if default is not None: + self.default = default + self.encoding = encoding + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + + """ + raise TypeError(repr(o) + " is not JSON serializable") + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> from simplejson import JSONEncoder + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, binary_type): + _encoding = self.encoding + if (_encoding is not None and not (_encoding == 'utf-8')): + o = o.decode(_encoding) + if isinstance(o, string_types): + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + if self.ensure_ascii: + return ''.join(chunks) + else: + return u''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + if self.encoding != 'utf-8': + def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): + if isinstance(o, binary_type): + o = o.decode(_encoding) + return _orig_encoder(o) + + def floatstr(o, allow_nan=self.allow_nan, ignore_nan=self.ignore_nan, + _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on + # the internals. + + if o != o: + text = 'NaN' + elif o == _inf: + text = 'Infinity' + elif o == _neginf: + text = '-Infinity' + else: + return _repr(o) + + if ignore_nan: + text = 'null' + elif not allow_nan: + raise ValueError( + "Out of range float values are not JSON compliant: " + + repr(o)) + + return text + + + key_memo = {} + if (_one_shot and c_make_encoder is not None + and self.indent is None): + _iterencode = c_make_encoder( + markers, self.default, _encoder, self.indent, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, self.allow_nan, key_memo, self.use_decimal, + self.namedtuple_as_object, self.tuple_as_array, + self.bigint_as_string, self.item_sort_key, + self.encoding, self.for_json, self.ignore_nan, + Decimal) + else: + _iterencode = _make_iterencode( + markers, self.default, _encoder, self.indent, floatstr, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, _one_shot, self.use_decimal, + self.namedtuple_as_object, self.tuple_as_array, + self.bigint_as_string, self.item_sort_key, + self.encoding, self.for_json, + Decimal=Decimal) + try: + return _iterencode(o, 0) + finally: + key_memo.clear() + + +class JSONEncoderForHTML(JSONEncoder): + """An encoder that produces JSON safe to embed in HTML. + + To embed JSON content in, say, a script tag on a web page, the + characters &, < and > should be escaped. They cannot be escaped + with the usual entities (e.g. &) because they are not expanded + within ' + self.assertEqual( + r'"\u003c/script\u003e\u003cscript\u003e' + r'alert(\"gotcha\")\u003c/script\u003e"', + self.encoder.encode(bad_string)) + self.assertEqual( + bad_string, self.decoder.decode( + self.encoder.encode(bad_string))) diff --git a/awx/lib/site-packages/simplejson/tests/test_errors.py b/awx/lib/site-packages/simplejson/tests/test_errors.py new file mode 100644 index 0000000000..6bc2fc80bf --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_errors.py @@ -0,0 +1,35 @@ +import sys +from unittest import TestCase + +import simplejson as json +from simplejson.compat import u, b + +class TestErrors(TestCase): + def test_string_keys_error(self): + data = [{'a': 'A', 'b': (2, 4), 'c': 3.0, ('d',): 'D tuple'}] + self.assertRaises(TypeError, json.dumps, data) + + def test_decode_error(self): + err = None + try: + json.loads('{}\na\nb') + except json.JSONDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected JSONDecodeError') + self.assertEqual(err.lineno, 2) + self.assertEqual(err.colno, 1) + self.assertEqual(err.endlineno, 3) + self.assertEqual(err.endcolno, 2) + + def test_scan_error(self): + err = None + for t in (u, b): + try: + json.loads(t('{"asdf": "')) + except json.JSONDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected JSONDecodeError') + self.assertEqual(err.lineno, 1) + self.assertEqual(err.colno, 10) diff --git a/awx/lib/site-packages/simplejson/tests/test_fail.py b/awx/lib/site-packages/simplejson/tests/test_fail.py new file mode 100644 index 0000000000..788f3a525b --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_fail.py @@ -0,0 +1,176 @@ +import sys +from unittest import TestCase + +import simplejson as json + +# 2007-10-05 +JSONDOCS = [ + # http://json.org/JSON_checker/test/fail1.json + '"A JSON payload should be an object or array, not a string."', + # http://json.org/JSON_checker/test/fail2.json + '["Unclosed array"', + # http://json.org/JSON_checker/test/fail3.json + '{unquoted_key: "keys must be quoted"}', + # http://json.org/JSON_checker/test/fail4.json + '["extra comma",]', + # http://json.org/JSON_checker/test/fail5.json + '["double extra comma",,]', + # http://json.org/JSON_checker/test/fail6.json + '[ , "<-- missing value"]', + # http://json.org/JSON_checker/test/fail7.json + '["Comma after the close"],', + # http://json.org/JSON_checker/test/fail8.json + '["Extra close"]]', + # http://json.org/JSON_checker/test/fail9.json + '{"Extra comma": true,}', + # http://json.org/JSON_checker/test/fail10.json + '{"Extra value after close": true} "misplaced quoted value"', + # http://json.org/JSON_checker/test/fail11.json + '{"Illegal expression": 1 + 2}', + # http://json.org/JSON_checker/test/fail12.json + '{"Illegal invocation": alert()}', + # http://json.org/JSON_checker/test/fail13.json + '{"Numbers cannot have leading zeroes": 013}', + # http://json.org/JSON_checker/test/fail14.json + '{"Numbers cannot be hex": 0x14}', + # http://json.org/JSON_checker/test/fail15.json + '["Illegal backslash escape: \\x15"]', + # http://json.org/JSON_checker/test/fail16.json + '[\\naked]', + # http://json.org/JSON_checker/test/fail17.json + '["Illegal backslash escape: \\017"]', + # http://json.org/JSON_checker/test/fail18.json + '[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', + # http://json.org/JSON_checker/test/fail19.json + '{"Missing colon" null}', + # http://json.org/JSON_checker/test/fail20.json + '{"Double colon":: null}', + # http://json.org/JSON_checker/test/fail21.json + '{"Comma instead of colon", null}', + # http://json.org/JSON_checker/test/fail22.json + '["Colon instead of comma": false]', + # http://json.org/JSON_checker/test/fail23.json + '["Bad value", truth]', + # http://json.org/JSON_checker/test/fail24.json + "['single quote']", + # http://json.org/JSON_checker/test/fail25.json + '["\ttab\tcharacter\tin\tstring\t"]', + # http://json.org/JSON_checker/test/fail26.json + '["tab\\ character\\ in\\ string\\ "]', + # http://json.org/JSON_checker/test/fail27.json + '["line\nbreak"]', + # http://json.org/JSON_checker/test/fail28.json + '["line\\\nbreak"]', + # http://json.org/JSON_checker/test/fail29.json + '[0e]', + # http://json.org/JSON_checker/test/fail30.json + '[0e+]', + # http://json.org/JSON_checker/test/fail31.json + '[0e+-1]', + # http://json.org/JSON_checker/test/fail32.json + '{"Comma instead if closing brace": true,', + # http://json.org/JSON_checker/test/fail33.json + '["mismatch"}', + # http://code.google.com/p/simplejson/issues/detail?id=3 + u'["A\u001FZ control characters in string"]', + # misc based on coverage + '{', + '{]', + '{"foo": "bar"]', + '{"foo": "bar"', + 'nul', + 'nulx', + '-', + '-x', + '-e', + '-e0', + '-Infinite', + '-Inf', + 'Infinit', + 'Infinite', + 'NaM', + 'NuN', + 'falsy', + 'fal', + 'trug', + 'tru', + '1e', + '1ex', + '1e-', + '1e-x', +] + +SKIPS = { + 1: "why not have a string payload?", + 18: "spec doesn't specify any nesting limitations", +} + +class TestFail(TestCase): + def test_failures(self): + for idx, doc in enumerate(JSONDOCS): + idx = idx + 1 + if idx in SKIPS: + json.loads(doc) + continue + try: + json.loads(doc) + except json.JSONDecodeError: + pass + else: + self.fail("Expected failure for fail%d.json: %r" % (idx, doc)) + + def test_array_decoder_issue46(self): + # http://code.google.com/p/simplejson/issues/detail?id=46 + for doc in [u'[,]', '[,]']: + try: + json.loads(doc) + except json.JSONDecodeError: + e = sys.exc_info()[1] + self.assertEqual(e.pos, 1) + self.assertEqual(e.lineno, 1) + self.assertEqual(e.colno, 2) + except Exception: + e = sys.exc_info()[1] + self.fail("Unexpected exception raised %r %s" % (e, e)) + else: + self.fail("Unexpected success parsing '[,]'") + + def test_truncated_input(self): + test_cases = [ + ('', 'Expecting value', 0), + ('[', "Expecting value or ']'", 1), + ('[42', "Expecting ',' delimiter", 3), + ('[42,', 'Expecting value', 4), + ('["', 'Unterminated string starting at', 1), + ('["spam', 'Unterminated string starting at', 1), + ('["spam"', "Expecting ',' delimiter", 7), + ('["spam",', 'Expecting value', 8), + ('{', 'Expecting property name enclosed in double quotes', 1), + ('{"', 'Unterminated string starting at', 1), + ('{"spam', 'Unterminated string starting at', 1), + ('{"spam"', "Expecting ':' delimiter", 7), + ('{"spam":', 'Expecting value', 8), + ('{"spam":42', "Expecting ',' delimiter", 10), + ('{"spam":42,', 'Expecting property name enclosed in double quotes', + 11), + ('"', 'Unterminated string starting at', 0), + ('"spam', 'Unterminated string starting at', 0), + ('[,', "Expecting value", 1), + ] + for data, msg, idx in test_cases: + try: + json.loads(data) + except json.JSONDecodeError: + e = sys.exc_info()[1] + self.assertEqual( + e.msg[:len(msg)], + msg, + "%r doesn't start with %r for %r" % (e.msg, msg, data)) + self.assertEqual( + e.pos, idx, + "pos %r != %r for %r" % (e.pos, idx, data)) + except Exception: + e = sys.exc_info()[1] + self.fail("Unexpected exception raised %r %s" % (e, e)) + else: + self.fail("Unexpected success parsing '%r'" % (data,)) diff --git a/awx/lib/site-packages/simplejson/tests/test_float.py b/awx/lib/site-packages/simplejson/tests/test_float.py new file mode 100644 index 0000000000..e382ec21ab --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_float.py @@ -0,0 +1,35 @@ +import math +from unittest import TestCase +from simplejson.compat import long_type, text_type +import simplejson as json +from simplejson.decoder import NaN, PosInf, NegInf + +class TestFloat(TestCase): + def test_degenerates_allow(self): + for inf in (PosInf, NegInf): + self.assertEqual(json.loads(json.dumps(inf)), inf) + # Python 2.5 doesn't have math.isnan + nan = json.loads(json.dumps(NaN)) + self.assertTrue((0 + nan) != nan) + + def test_degenerates_ignore(self): + for f in (PosInf, NegInf, NaN): + self.assertEqual(json.loads(json.dumps(f, ignore_nan=True)), None) + + def test_degenerates_deny(self): + for f in (PosInf, NegInf, NaN): + self.assertRaises(ValueError, json.dumps, f, allow_nan=False) + + def test_floats(self): + for num in [1617161771.7650001, math.pi, math.pi**100, + math.pi**-100, 3.1]: + self.assertEqual(float(json.dumps(num)), num) + self.assertEqual(json.loads(json.dumps(num)), num) + self.assertEqual(json.loads(text_type(json.dumps(num))), num) + + def test_ints(self): + for num in [1, long_type(1), 1<<32, 1<<64]: + self.assertEqual(json.dumps(num), str(num)) + self.assertEqual(int(json.dumps(num)), num) + self.assertEqual(json.loads(json.dumps(num)), num) + self.assertEqual(json.loads(text_type(json.dumps(num))), num) diff --git a/awx/lib/site-packages/simplejson/tests/test_for_json.py b/awx/lib/site-packages/simplejson/tests/test_for_json.py new file mode 100644 index 0000000000..b791b883b0 --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_for_json.py @@ -0,0 +1,97 @@ +import unittest +import simplejson as json + + +class ForJson(object): + def for_json(self): + return {'for_json': 1} + + +class NestedForJson(object): + def for_json(self): + return {'nested': ForJson()} + + +class ForJsonList(object): + def for_json(self): + return ['list'] + + +class DictForJson(dict): + def for_json(self): + return {'alpha': 1} + + +class ListForJson(list): + def for_json(self): + return ['list'] + + +class TestForJson(unittest.TestCase): + def assertRoundTrip(self, obj, other, for_json=True): + if for_json is None: + # None will use the default + s = json.dumps(obj) + else: + s = json.dumps(obj, for_json=for_json) + self.assertEqual( + json.loads(s), + other) + + def test_for_json_encodes_stand_alone_object(self): + self.assertRoundTrip( + ForJson(), + ForJson().for_json()) + + def test_for_json_encodes_object_nested_in_dict(self): + self.assertRoundTrip( + {'hooray': ForJson()}, + {'hooray': ForJson().for_json()}) + + def test_for_json_encodes_object_nested_in_list_within_dict(self): + self.assertRoundTrip( + {'list': [0, ForJson(), 2, 3]}, + {'list': [0, ForJson().for_json(), 2, 3]}) + + def test_for_json_encodes_object_nested_within_object(self): + self.assertRoundTrip( + NestedForJson(), + {'nested': {'for_json': 1}}) + + def test_for_json_encodes_list(self): + self.assertRoundTrip( + ForJsonList(), + ForJsonList().for_json()) + + def test_for_json_encodes_list_within_object(self): + self.assertRoundTrip( + {'nested': ForJsonList()}, + {'nested': ForJsonList().for_json()}) + + def test_for_json_encodes_dict_subclass(self): + self.assertRoundTrip( + DictForJson(a=1), + DictForJson(a=1).for_json()) + + def test_for_json_encodes_list_subclass(self): + self.assertRoundTrip( + ListForJson(['l']), + ListForJson(['l']).for_json()) + + def test_for_json_ignored_if_not_true_with_dict_subclass(self): + for for_json in (None, False): + self.assertRoundTrip( + DictForJson(a=1), + {'a': 1}, + for_json=for_json) + + def test_for_json_ignored_if_not_true_with_list_subclass(self): + for for_json in (None, False): + self.assertRoundTrip( + ListForJson(['l']), + ['l'], + for_json=for_json) + + def test_raises_typeerror_if_for_json_not_true_with_object(self): + self.assertRaises(TypeError, json.dumps, ForJson()) + self.assertRaises(TypeError, json.dumps, ForJson(), for_json=False) diff --git a/awx/lib/site-packages/simplejson/tests/test_indent.py b/awx/lib/site-packages/simplejson/tests/test_indent.py new file mode 100644 index 0000000000..cea25a575e --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_indent.py @@ -0,0 +1,86 @@ +from unittest import TestCase +import textwrap + +import simplejson as json +from simplejson.compat import StringIO + +class TestIndent(TestCase): + def test_indent(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', + 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + \t[ + \t\t"blorpie" + \t], + \t[ + \t\t"whoops" + \t], + \t[], + \t"d-shtaeou", + \t"d-nthiouh", + \t"i-vhbjkhnth", + \t{ + \t\t"nifty": 87 + \t}, + \t{ + \t\t"field": "yes", + \t\t"morefield": false + \t} + ]""") + + + d1 = json.dumps(h) + d2 = json.dumps(h, indent='\t', sort_keys=True, separators=(',', ': ')) + d3 = json.dumps(h, indent=' ', sort_keys=True, separators=(',', ': ')) + d4 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + h3 = json.loads(d3) + h4 = json.loads(d4) + + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(h3, h) + self.assertEqual(h4, h) + self.assertEqual(d3, expect.replace('\t', ' ')) + self.assertEqual(d4, expect.replace('\t', ' ')) + # NOTE: Python 2.4 textwrap.dedent converts tabs to spaces, + # so the following is expected to fail. Python 2.4 is not a + # supported platform in simplejson 2.1.0+. + self.assertEqual(d2, expect) + + def test_indent0(self): + h = {3: 1} + def check(indent, expected): + d1 = json.dumps(h, indent=indent) + self.assertEqual(d1, expected) + + sio = StringIO() + json.dump(h, sio, indent=indent) + self.assertEqual(sio.getvalue(), expected) + + # indent=0 should emit newlines + check(0, '{\n"3": 1\n}') + # indent=None is more compact + check(None, '{"3": 1}') + + def test_separators(self): + lst = [1,2,3,4] + expect = '[\n1,\n2,\n3,\n4\n]' + expect_spaces = '[\n1, \n2, \n3, \n4\n]' + # Ensure that separators still works + self.assertEqual( + expect_spaces, + json.dumps(lst, indent=0, separators=(', ', ': '))) + # Force the new defaults + self.assertEqual( + expect, + json.dumps(lst, indent=0, separators=(',', ': '))) + # Added in 2.1.4 + self.assertEqual( + expect, + json.dumps(lst, indent=0)) diff --git a/awx/lib/site-packages/simplejson/tests/test_item_sort_key.py b/awx/lib/site-packages/simplejson/tests/test_item_sort_key.py new file mode 100644 index 0000000000..b05bfc8149 --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_item_sort_key.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import simplejson as json +from operator import itemgetter + +class TestItemSortKey(TestCase): + def test_simple_first(self): + a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'} + self.assertEqual( + '{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}', + json.dumps(a, item_sort_key=json.simple_first)) + + def test_case(self): + a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'} + self.assertEqual( + '{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}', + json.dumps(a, item_sort_key=itemgetter(0))) + self.assertEqual( + '{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}', + json.dumps(a, item_sort_key=lambda kv: kv[0].lower())) diff --git a/awx/lib/site-packages/simplejson/tests/test_namedtuple.py b/awx/lib/site-packages/simplejson/tests/test_namedtuple.py new file mode 100644 index 0000000000..438789405d --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_namedtuple.py @@ -0,0 +1,122 @@ +from __future__ import absolute_import +import unittest +import simplejson as json +from simplejson.compat import StringIO + +try: + from collections import namedtuple +except ImportError: + class Value(tuple): + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def _asdict(self): + return {'value': self[0]} + class Point(tuple): + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def _asdict(self): + return {'x': self[0], 'y': self[1]} +else: + Value = namedtuple('Value', ['value']) + Point = namedtuple('Point', ['x', 'y']) + +class DuckValue(object): + def __init__(self, *args): + self.value = Value(*args) + + def _asdict(self): + return self.value._asdict() + +class DuckPoint(object): + def __init__(self, *args): + self.point = Point(*args) + + def _asdict(self): + return self.point._asdict() + +class DeadDuck(object): + _asdict = None + +class DeadDict(dict): + _asdict = None + +CONSTRUCTORS = [ + lambda v: v, + lambda v: [v], + lambda v: [{'key': v}], +] + +class TestNamedTuple(unittest.TestCase): + def test_namedtuple_dumps(self): + for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]: + d = v._asdict() + self.assertEqual(d, json.loads(json.dumps(v))) + self.assertEqual( + d, + json.loads(json.dumps(v, namedtuple_as_object=True))) + self.assertEqual(d, json.loads(json.dumps(v, tuple_as_array=False))) + self.assertEqual( + d, + json.loads(json.dumps(v, namedtuple_as_object=True, + tuple_as_array=False))) + + def test_namedtuple_dumps_false(self): + for v in [Value(1), Point(1, 2)]: + l = list(v) + self.assertEqual( + l, + json.loads(json.dumps(v, namedtuple_as_object=False))) + self.assertRaises(TypeError, json.dumps, v, + tuple_as_array=False, namedtuple_as_object=False) + + def test_namedtuple_dump(self): + for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]: + d = v._asdict() + sio = StringIO() + json.dump(v, sio) + self.assertEqual(d, json.loads(sio.getvalue())) + sio = StringIO() + json.dump(v, sio, namedtuple_as_object=True) + self.assertEqual( + d, + json.loads(sio.getvalue())) + sio = StringIO() + json.dump(v, sio, tuple_as_array=False) + self.assertEqual(d, json.loads(sio.getvalue())) + sio = StringIO() + json.dump(v, sio, namedtuple_as_object=True, + tuple_as_array=False) + self.assertEqual( + d, + json.loads(sio.getvalue())) + + def test_namedtuple_dump_false(self): + for v in [Value(1), Point(1, 2)]: + l = list(v) + sio = StringIO() + json.dump(v, sio, namedtuple_as_object=False) + self.assertEqual( + l, + json.loads(sio.getvalue())) + self.assertRaises(TypeError, json.dump, v, StringIO(), + tuple_as_array=False, namedtuple_as_object=False) + + def test_asdict_not_callable_dump(self): + for f in CONSTRUCTORS: + self.assertRaises(TypeError, + json.dump, f(DeadDuck()), StringIO(), namedtuple_as_object=True) + sio = StringIO() + json.dump(f(DeadDict()), sio, namedtuple_as_object=True) + self.assertEqual( + json.dumps(f({})), + sio.getvalue()) + + def test_asdict_not_callable_dumps(self): + for f in CONSTRUCTORS: + self.assertRaises(TypeError, + json.dumps, f(DeadDuck()), namedtuple_as_object=True) + self.assertEqual( + json.dumps(f({})), + json.dumps(f(DeadDict()), namedtuple_as_object=True)) diff --git a/awx/lib/site-packages/simplejson/tests/test_pass1.py b/awx/lib/site-packages/simplejson/tests/test_pass1.py new file mode 100644 index 0000000000..f0b5b10e76 --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_pass1.py @@ -0,0 +1,71 @@ +from unittest import TestCase + +import simplejson as json + +# from http://json.org/JSON_checker/test/pass1.json +JSON = r''' +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E66, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ],"compact": [1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066, +1e1, +0.1e1, +1e-1, +1e00,2e+00,2e-00 +,"rosebud"] +''' + +class TestPass1(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEqual(res, json.loads(out)) diff --git a/awx/lib/site-packages/simplejson/tests/test_pass2.py b/awx/lib/site-packages/simplejson/tests/test_pass2.py new file mode 100644 index 0000000000..5d812b3bbe --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_pass2.py @@ -0,0 +1,14 @@ +from unittest import TestCase +import simplejson as json + +# from http://json.org/JSON_checker/test/pass2.json +JSON = r''' +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] +''' + +class TestPass2(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEqual(res, json.loads(out)) diff --git a/awx/lib/site-packages/simplejson/tests/test_pass3.py b/awx/lib/site-packages/simplejson/tests/test_pass3.py new file mode 100644 index 0000000000..821d60b22c --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_pass3.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import simplejson as json + +# from http://json.org/JSON_checker/test/pass3.json +JSON = r''' +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} +''' + +class TestPass3(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEqual(res, json.loads(out)) diff --git a/awx/lib/site-packages/simplejson/tests/test_recursion.py b/awx/lib/site-packages/simplejson/tests/test_recursion.py new file mode 100644 index 0000000000..662eb667ec --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_recursion.py @@ -0,0 +1,67 @@ +from unittest import TestCase + +import simplejson as json + +class JSONTestObject: + pass + + +class RecursiveJSONEncoder(json.JSONEncoder): + recurse = False + def default(self, o): + if o is JSONTestObject: + if self.recurse: + return [JSONTestObject] + else: + return 'JSONTestObject' + return json.JSONEncoder.default(o) + + +class TestRecursion(TestCase): + def test_listrecursion(self): + x = [] + x.append(x) + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on list recursion") + x = [] + y = [x] + x.append(y) + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on alternating list recursion") + y = [] + x = [y, y] + # ensure that the marker is cleared + json.dumps(x) + + def test_dictrecursion(self): + x = {} + x["test"] = x + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on dict recursion") + x = {} + y = {"a": x, "b": x} + # ensure that the marker is cleared + json.dumps(y) + + def test_defaultrecursion(self): + enc = RecursiveJSONEncoder() + self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"') + enc.recurse = True + try: + enc.encode(JSONTestObject) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on default recursion") diff --git a/awx/lib/site-packages/simplejson/tests/test_scanstring.py b/awx/lib/site-packages/simplejson/tests/test_scanstring.py new file mode 100644 index 0000000000..3d98f0d82e --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_scanstring.py @@ -0,0 +1,194 @@ +import sys +from unittest import TestCase + +import simplejson as json +import simplejson.decoder +from simplejson.compat import b, PY3 + +class TestScanString(TestCase): + # The bytes type is intentionally not used in most of these tests + # under Python 3 because the decoder immediately coerces to str before + # calling scanstring. In Python 2 we are testing the code paths + # for both unicode and str. + # + # The reason this is done is because Python 3 would require + # entirely different code paths for parsing bytes and str. + # + def test_py_scanstring(self): + self._test_scanstring(simplejson.decoder.py_scanstring) + + def test_c_scanstring(self): + if not simplejson.decoder.c_scanstring: + return + self._test_scanstring(simplejson.decoder.c_scanstring) + + def _test_scanstring(self, scanstring): + if sys.maxunicode == 65535: + self.assertEqual( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 6)) + else: + self.assertEqual( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 5)) + + self.assertEqual( + scanstring('"\\u007b"', 1, None, True), + (u'{', 8)) + + self.assertEqual( + scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True), + (u'A JSON payload should be an object or array, not a string.', 60)) + + self.assertEqual( + scanstring('["Unclosed array"', 2, None, True), + (u'Unclosed array', 17)) + + self.assertEqual( + scanstring('["extra comma",]', 2, None, True), + (u'extra comma', 14)) + + self.assertEqual( + scanstring('["double extra comma",,]', 2, None, True), + (u'double extra comma', 21)) + + self.assertEqual( + scanstring('["Comma after the close"],', 2, None, True), + (u'Comma after the close', 24)) + + self.assertEqual( + scanstring('["Extra close"]]', 2, None, True), + (u'Extra close', 14)) + + self.assertEqual( + scanstring('{"Extra comma": true,}', 2, None, True), + (u'Extra comma', 14)) + + self.assertEqual( + scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True), + (u'Extra value after close', 26)) + + self.assertEqual( + scanstring('{"Illegal expression": 1 + 2}', 2, None, True), + (u'Illegal expression', 21)) + + self.assertEqual( + scanstring('{"Illegal invocation": alert()}', 2, None, True), + (u'Illegal invocation', 21)) + + self.assertEqual( + scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True), + (u'Numbers cannot have leading zeroes', 37)) + + self.assertEqual( + scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True), + (u'Numbers cannot be hex', 24)) + + self.assertEqual( + scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True), + (u'Too deep', 30)) + + self.assertEqual( + scanstring('{"Missing colon" null}', 2, None, True), + (u'Missing colon', 16)) + + self.assertEqual( + scanstring('{"Double colon":: null}', 2, None, True), + (u'Double colon', 15)) + + self.assertEqual( + scanstring('{"Comma instead of colon", null}', 2, None, True), + (u'Comma instead of colon', 25)) + + self.assertEqual( + scanstring('["Colon instead of comma": false]', 2, None, True), + (u'Colon instead of comma', 25)) + + self.assertEqual( + scanstring('["Bad value", truth]', 2, None, True), + (u'Bad value', 12)) + + for c in map(chr, range(0x00, 0x1f)): + self.assertEqual( + scanstring(c + '"', 0, None, False), + (c, 2)) + self.assertRaises( + ValueError, + scanstring, c + '"', 0, None, True) + + self.assertRaises(ValueError, scanstring, '', 0, None, True) + self.assertRaises(ValueError, scanstring, 'a', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u0', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u01', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u012', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u0123', 0, None, True) + if sys.maxunicode > 65535: + self.assertRaises(ValueError, + scanstring, '\\ud834\\u"', 0, None, True) + self.assertRaises(ValueError, + scanstring, '\\ud834\\x0123"', 0, None, True) + + def test_issue3623(self): + self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1, + "xxx") + self.assertRaises(UnicodeDecodeError, + json.encoder.encode_basestring_ascii, b("xx\xff")) + + def test_overflow(self): + # Python 2.5 does not have maxsize, Python 3 does not have maxint + maxsize = getattr(sys, 'maxsize', getattr(sys, 'maxint', None)) + assert maxsize is not None + self.assertRaises(OverflowError, json.decoder.scanstring, "xxx", + maxsize + 1) + + def test_surrogates(self): + scanstring = json.decoder.scanstring + + def assertScan(given, expect, test_utf8=True): + givens = [given] + if not PY3 and test_utf8: + givens.append(given.encode('utf8')) + for given in givens: + (res, count) = scanstring(given, 1, None, True) + self.assertEqual(len(given), count) + self.assertEqual(res, expect) + + assertScan( + u'"z\\ud834\\u0079x"', + u'z\ud834yx') + assertScan( + u'"z\\ud834\\udd20x"', + u'z\U0001d120x') + assertScan( + u'"z\\ud834\\ud834\\udd20x"', + u'z\ud834\U0001d120x') + assertScan( + u'"z\\ud834x"', + u'z\ud834x') + assertScan( + u'"z\\udd20x"', + u'z\udd20x') + assertScan( + u'"z\ud834x"', + u'z\ud834x') + # It may look strange to join strings together, but Python is drunk. + # https://gist.github.com/etrepum/5538443 + assertScan( + u'"z\\ud834\udd20x12345"', + u''.join([u'z\ud834', u'\udd20x12345'])) + assertScan( + u'"z\ud834\\udd20x"', + u''.join([u'z\ud834', u'\udd20x'])) + # these have different behavior given UTF8 input, because the surrogate + # pair may be joined (in maxunicode > 65535 builds) + assertScan( + u''.join([u'"z\ud834', u'\udd20x"']), + u''.join([u'z\ud834', u'\udd20x']), + test_utf8=False) + + self.assertRaises(ValueError, + scanstring, u'"z\\ud83x"', 1, None, True) + self.assertRaises(ValueError, + scanstring, u'"z\\ud834\\udd2x"', 1, None, True) diff --git a/awx/lib/site-packages/simplejson/tests/test_separators.py b/awx/lib/site-packages/simplejson/tests/test_separators.py new file mode 100644 index 0000000000..91b4d4fb6f --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_separators.py @@ -0,0 +1,42 @@ +import textwrap +from unittest import TestCase + +import simplejson as json + + +class TestSeparators(TestCase): + def test_separators(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + [ + "blorpie" + ] , + [ + "whoops" + ] , + [] , + "d-shtaeou" , + "d-nthiouh" , + "i-vhbjkhnth" , + { + "nifty" : 87 + } , + { + "field" : "yes" , + "morefield" : false + } + ]""") + + + d1 = json.dumps(h) + d2 = json.dumps(h, indent=' ', sort_keys=True, separators=(' ,', ' : ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(d2, expect) diff --git a/awx/lib/site-packages/simplejson/tests/test_speedups.py b/awx/lib/site-packages/simplejson/tests/test_speedups.py new file mode 100644 index 0000000000..825ecf26f9 --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_speedups.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +from simplejson import encoder, scanner + +def has_speedups(): + return encoder.c_make_encoder is not None + +class TestDecode(TestCase): + def test_make_scanner(self): + if not has_speedups(): + return + self.assertRaises(AttributeError, scanner.c_make_scanner, 1) + + def test_make_encoder(self): + if not has_speedups(): + return + self.assertRaises(TypeError, encoder.c_make_encoder, + None, + "\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75", + None) diff --git a/awx/lib/site-packages/simplejson/tests/test_tool.py b/awx/lib/site-packages/simplejson/tests/test_tool.py new file mode 100644 index 0000000000..ac2a14c906 --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_tool.py @@ -0,0 +1,97 @@ +from __future__ import with_statement +import os +import sys +import textwrap +import unittest +import subprocess +import tempfile +try: + # Python 3.x + from test.support import strip_python_stderr +except ImportError: + # Python 2.6+ + try: + from test.test_support import strip_python_stderr + except ImportError: + # Python 2.5 + import re + def strip_python_stderr(stderr): + return re.sub( + r"\[\d+ refs\]\r?\n?$".encode(), + "".encode(), + stderr).strip() + +class TestTool(unittest.TestCase): + data = """ + + [["blorpie"],[ "whoops" ] , [ + ],\t"d-shtaeou",\r"d-nthiouh", + "i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field" + :"yes"} ] + """ + + expect = textwrap.dedent("""\ + [ + [ + "blorpie" + ], + [ + "whoops" + ], + [], + "d-shtaeou", + "d-nthiouh", + "i-vhbjkhnth", + { + "nifty": 87 + }, + { + "field": "yes", + "morefield": false + } + ] + """) + + def runTool(self, args=None, data=None): + argv = [sys.executable, '-m', 'simplejson.tool'] + if args: + argv.extend(args) + proc = subprocess.Popen(argv, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = proc.communicate(data) + self.assertEqual(strip_python_stderr(err), ''.encode()) + self.assertEqual(proc.returncode, 0) + return out + + def test_stdin_stdout(self): + self.assertEqual( + self.runTool(data=self.data.encode()), + self.expect.encode()) + + def test_infile_stdout(self): + with tempfile.NamedTemporaryFile() as infile: + infile.write(self.data.encode()) + infile.flush() + self.assertEqual( + self.runTool(args=[infile.name]), + self.expect.encode()) + + def test_infile_outfile(self): + with tempfile.NamedTemporaryFile() as infile: + infile.write(self.data.encode()) + infile.flush() + # outfile will get overwritten by tool, so the delete + # may not work on some platforms. Do it manually. + outfile = tempfile.NamedTemporaryFile() + try: + self.assertEqual( + self.runTool(args=[infile.name, outfile.name]), + ''.encode()) + with open(outfile.name, 'rb') as f: + self.assertEqual(f.read(), self.expect.encode()) + finally: + outfile.close() + if os.path.exists(outfile.name): + os.unlink(outfile.name) diff --git a/awx/lib/site-packages/simplejson/tests/test_tuple.py b/awx/lib/site-packages/simplejson/tests/test_tuple.py new file mode 100644 index 0000000000..a6a991005c --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_tuple.py @@ -0,0 +1,51 @@ +import unittest + +from simplejson.compat import StringIO +import simplejson as json + +class TestTuples(unittest.TestCase): + def test_tuple_array_dumps(self): + t = (1, 2, 3) + expect = json.dumps(list(t)) + # Default is True + self.assertEqual(expect, json.dumps(t)) + self.assertEqual(expect, json.dumps(t, tuple_as_array=True)) + self.assertRaises(TypeError, json.dumps, t, tuple_as_array=False) + # Ensure that the "default" does not get called + self.assertEqual(expect, json.dumps(t, default=repr)) + self.assertEqual(expect, json.dumps(t, tuple_as_array=True, + default=repr)) + # Ensure that the "default" gets called + self.assertEqual( + json.dumps(repr(t)), + json.dumps(t, tuple_as_array=False, default=repr)) + + def test_tuple_array_dump(self): + t = (1, 2, 3) + expect = json.dumps(list(t)) + # Default is True + sio = StringIO() + json.dump(t, sio) + self.assertEqual(expect, sio.getvalue()) + sio = StringIO() + json.dump(t, sio, tuple_as_array=True) + self.assertEqual(expect, sio.getvalue()) + self.assertRaises(TypeError, json.dump, t, StringIO(), + tuple_as_array=False) + # Ensure that the "default" does not get called + sio = StringIO() + json.dump(t, sio, default=repr) + self.assertEqual(expect, sio.getvalue()) + sio = StringIO() + json.dump(t, sio, tuple_as_array=True, default=repr) + self.assertEqual(expect, sio.getvalue()) + # Ensure that the "default" gets called + sio = StringIO() + json.dump(t, sio, tuple_as_array=False, default=repr) + self.assertEqual( + json.dumps(repr(t)), + sio.getvalue()) + +class TestNamedTuple(unittest.TestCase): + def test_namedtuple_dump(self): + pass diff --git a/awx/lib/site-packages/simplejson/tests/test_unicode.py b/awx/lib/site-packages/simplejson/tests/test_unicode.py new file mode 100644 index 0000000000..f04cc5c0ba --- /dev/null +++ b/awx/lib/site-packages/simplejson/tests/test_unicode.py @@ -0,0 +1,145 @@ +import sys +from unittest import TestCase + +import simplejson as json +from simplejson.compat import unichr, text_type, b, u + +class TestUnicode(TestCase): + def test_encoding1(self): + encoder = json.JSONEncoder(encoding='utf-8') + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = encoder.encode(u) + js = encoder.encode(s) + self.assertEqual(ju, js) + + def test_encoding2(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = json.dumps(u, encoding='utf-8') + js = json.dumps(s, encoding='utf-8') + self.assertEqual(ju, js) + + def test_encoding3(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps(u) + self.assertEqual(j, '"\\u03b1\\u03a9"') + + def test_encoding4(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps([u]) + self.assertEqual(j, '["\\u03b1\\u03a9"]') + + def test_encoding5(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps(u, ensure_ascii=False) + self.assertEqual(j, u'"' + u + u'"') + + def test_encoding6(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps([u], ensure_ascii=False) + self.assertEqual(j, u'["' + u + u'"]') + + def test_big_unicode_encode(self): + u = u'\U0001d120' + self.assertEqual(json.dumps(u), '"\\ud834\\udd20"') + self.assertEqual(json.dumps(u, ensure_ascii=False), u'"\U0001d120"') + + def test_big_unicode_decode(self): + u = u'z\U0001d120x' + self.assertEqual(json.loads('"' + u + '"'), u) + self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u) + + def test_unicode_decode(self): + for i in range(0, 0xd7ff): + u = unichr(i) + #s = '"\\u{0:04x}"'.format(i) + s = '"\\u%04x"' % (i,) + self.assertEqual(json.loads(s), u) + + def test_object_pairs_hook_with_unicode(self): + s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' + p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4), + (u"qrt", 5), (u"pad", 6), (u"hoy", 7)] + self.assertEqual(json.loads(s), eval(s)) + self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p) + od = json.loads(s, object_pairs_hook=json.OrderedDict) + self.assertEqual(od, json.OrderedDict(p)) + self.assertEqual(type(od), json.OrderedDict) + # the object_pairs_hook takes priority over the object_hook + self.assertEqual(json.loads(s, + object_pairs_hook=json.OrderedDict, + object_hook=lambda x: None), + json.OrderedDict(p)) + + + def test_default_encoding(self): + self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')), + {'a': u'\xe9'}) + + def test_unicode_preservation(self): + self.assertEqual(type(json.loads(u'""')), text_type) + self.assertEqual(type(json.loads(u'"a"')), text_type) + self.assertEqual(type(json.loads(u'["a"]')[0]), text_type) + + def test_ensure_ascii_false_returns_unicode(self): + # http://code.google.com/p/simplejson/issues/detail?id=48 + self.assertEqual(type(json.dumps([], ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumps(0, ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumps({}, ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumps("", ensure_ascii=False)), text_type) + + def test_ensure_ascii_false_bytestring_encoding(self): + # http://code.google.com/p/simplejson/issues/detail?id=48 + doc1 = {u'quux': b('Arr\xc3\xaat sur images')} + doc2 = {u'quux': u('Arr\xeat sur images')} + doc_ascii = '{"quux": "Arr\\u00eat sur images"}' + doc_unicode = u'{"quux": "Arr\xeat sur images"}' + self.assertEqual(json.dumps(doc1), doc_ascii) + self.assertEqual(json.dumps(doc2), doc_ascii) + self.assertEqual(json.dumps(doc1, ensure_ascii=False), doc_unicode) + self.assertEqual(json.dumps(doc2, ensure_ascii=False), doc_unicode) + + def test_ensure_ascii_linebreak_encoding(self): + # http://timelessrepo.com/json-isnt-a-javascript-subset + s1 = u'\u2029\u2028' + s2 = s1.encode('utf8') + expect = '"\\u2029\\u2028"' + self.assertEqual(json.dumps(s1), expect) + self.assertEqual(json.dumps(s2), expect) + self.assertEqual(json.dumps(s1, ensure_ascii=False), expect) + self.assertEqual(json.dumps(s2, ensure_ascii=False), expect) + + def test_invalid_escape_sequences(self): + # incomplete escape sequence + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1234') + # invalid escape sequence + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123x"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12x4"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1x34"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ux234"') + if sys.maxunicode > 65535: + # invalid escape sequence for low surrogate + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000x"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00x0"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0x00"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\ux000"') + + def test_ensure_ascii_still_works(self): + # in the ascii range, ensure that everything is the same + for c in map(unichr, range(0, 127)): + self.assertEqual( + json.dumps(c, ensure_ascii=False), + json.dumps(c)) + snowman = u'\N{SNOWMAN}' + self.assertEqual( + json.dumps(c, ensure_ascii=False), + '"' + c + '"') diff --git a/awx/lib/site-packages/simplejson/tool.py b/awx/lib/site-packages/simplejson/tool.py new file mode 100644 index 0000000000..062e8e2c18 --- /dev/null +++ b/awx/lib/site-packages/simplejson/tool.py @@ -0,0 +1,42 @@ +r"""Command-line tool to validate and pretty-print JSON + +Usage:: + + $ echo '{"json":"obj"}' | python -m simplejson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m simplejson.tool + Expecting property name: line 1 column 2 (char 2) + +""" +from __future__ import with_statement +import sys +import simplejson as json + +def main(): + if len(sys.argv) == 1: + infile = sys.stdin + outfile = sys.stdout + elif len(sys.argv) == 2: + infile = open(sys.argv[1], 'r') + outfile = sys.stdout + elif len(sys.argv) == 3: + infile = open(sys.argv[1], 'r') + outfile = open(sys.argv[2], 'w') + else: + raise SystemExit(sys.argv[0] + " [infile [outfile]]") + with infile: + try: + obj = json.load(infile, + object_pairs_hook=json.OrderedDict, + use_decimal=True) + except ValueError: + raise SystemExit(sys.exc_info()[1]) + with outfile: + json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True) + outfile.write('\n') + + +if __name__ == '__main__': + main() diff --git a/awx/lib/site-packages/six.py b/awx/lib/site-packages/six.py index eae31454ae..85898ec712 100644 --- a/awx/lib/site-packages/six.py +++ b/awx/lib/site-packages/six.py @@ -2,32 +2,34 @@ # Copyright (c) 2010-2013 Benjamin Peterson # -# Permission is hereby granted, free of charge, to any person obtaining a copy of -# this software and associated documentation files (the "Software"), to deal in -# the Software without restriction, including without limitation the rights to -# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -# the Software, and to permit persons to whom the Software is furnished to do so, -# subject to the following conditions: +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. import operator import sys import types __author__ = "Benjamin Peterson " -__version__ = "1.3.0" +__version__ = "1.4.1" -# True if we are running on Python 3. +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: @@ -61,7 +63,7 @@ else: else: # 64-bit MAXSIZE = int((1 << 63) - 1) - del X + del X def _add_doc(func, doc): @@ -136,13 +138,17 @@ class _MovedItems(types.ModuleType): _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), @@ -179,6 +185,9 @@ _moved_attributes = [ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("winreg", "_winreg"), ] @@ -186,7 +195,144 @@ for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) del attr -moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves") +moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves") + + + +class Module_six_moves_urllib_parse(types.ModuleType): + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse") +sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse") + + +class Module_six_moves_urllib_error(types.ModuleType): + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error") +sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error") + + +class Module_six_moves_urllib_request(types.ModuleType): + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request") +sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request") + + +class Module_six_moves_urllib_response(types.ModuleType): + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response") +sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(types.ModuleType): + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser") +sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + parse = sys.modules[__name__ + ".moves.urllib_parse"] + error = sys.modules[__name__ + ".moves.urllib_error"] + request = sys.modules[__name__ + ".moves.urllib_request"] + response = sys.modules[__name__ + ".moves.urllib_response"] + robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"] + + +sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib") def add_move(move): @@ -252,11 +398,16 @@ if PY3: def get_unbound_function(unbound): return unbound + create_bound_method = types.MethodType + Iterator = object else: def get_unbound_function(unbound): return unbound.im_func + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + class Iterator(object): def next(self): @@ -297,12 +448,16 @@ if PY3: return s.encode("latin-1") def u(s): return s + unichr = chr if sys.version_info[1] <= 1: def int2byte(i): return bytes((i,)) else: # This is about 2x faster than the implementation above on 3.2+ int2byte = operator.methodcaller("to_bytes", 1, "big") + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO @@ -311,7 +466,14 @@ else: return s def u(s): return unicode(s, "unicode_escape") + unichr = unichr int2byte = chr + def byte2int(bs): + return ord(bs[0]) + def indexbytes(buf, i): + return ord(buf[i]) + def iterbytes(buf): + return (ord(byte) for byte in buf) import StringIO StringIO = BytesIO = StringIO.StringIO _add_doc(b, """Byte literal""") @@ -399,6 +561,17 @@ else: _add_doc(reraise, """Reraise an exception.""") -def with_metaclass(meta, base=object): +def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" - return meta("NewBase", (base,), {}) + return meta("NewBase", bases, {}) + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + for slots_var in orig_vars.get('__slots__', ()): + orig_vars.pop(slots_var) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper diff --git a/awx/lib/site-packages/swiftclient/__init__.py b/awx/lib/site-packages/swiftclient/__init__.py new file mode 100644 index 0000000000..1d742494ef --- /dev/null +++ b/awx/lib/site-packages/swiftclient/__init__.py @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2012 Rackspace +# flake8: noqa +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""" +OpenStack Swift Python client binding. +""" +from .client import * + +# At setup.py time, we haven't installed anything yet, so there +# is nothing that is able to set this version property. Squelching +# that exception here should be fine- if there are problems with +# pkg_resources in a real install, that will manifest itself as +# an error still +try: + from swiftclient import version + + __version__ = version.version_info.cached_version_string() +except Exception: + pass diff --git a/awx/lib/site-packages/swiftclient/client.py b/awx/lib/site-packages/swiftclient/client.py new file mode 100644 index 0000000000..a95ce707dd --- /dev/null +++ b/awx/lib/site-packages/swiftclient/client.py @@ -0,0 +1,1245 @@ +# Copyright (c) 2010-2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Cloud Files client library used internally +""" + +import socket +import sys +import logging +import warnings +from functools import wraps + +from urllib import quote as _quote +from urlparse import urlparse, urlunparse +from httplib import HTTPException, HTTPConnection, HTTPSConnection +from time import sleep + +from swiftclient.exceptions import ClientException, InvalidHeadersException + +try: + from swiftclient.https_connection import HTTPSConnectionNoSSLComp +except ImportError: + HTTPSConnectionNoSSLComp = HTTPSConnection + + +try: + from logging import NullHandler +except ImportError: + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + +logger = logging.getLogger("swiftclient") +logger.addHandler(NullHandler()) + + +def http_log(args, kwargs, resp, body): + if not logger.isEnabledFor(logging.DEBUG): + return + + string_parts = ['curl -i'] + for element in args: + if element == 'HEAD': + string_parts.append(' -I') + elif element in ('GET', 'POST', 'PUT'): + string_parts.append(' -X %s' % element) + else: + string_parts.append(' %s' % element) + + if 'headers' in kwargs: + for element in kwargs['headers']: + header = ' -H "%s: %s"' % (element, kwargs['headers'][element]) + string_parts.append(header) + + logger.debug("REQ: %s\n" % "".join(string_parts)) + if 'raw_body' in kwargs: + logger.debug("REQ BODY (RAW): %s\n" % (kwargs['raw_body'])) + if 'body' in kwargs: + logger.debug("REQ BODY: %s\n" % (kwargs['body'])) + + logger.debug("RESP STATUS: %s\n", resp.status) + if body: + logger.debug("RESP BODY: %s\n", body) + + +def quote(value, safe='/'): + """ + Patched version of urllib.quote that encodes utf8 strings before quoting + """ + value = encode_utf8(value) + if isinstance(value, str): + return _quote(value, safe) + else: + return value + + +def validate_headers(headers): + if headers: + for key, value in headers.iteritems(): + if '\n' in value: + raise InvalidHeadersException("%r header contained a " + "newline" % key) + if '\r' in value: + raise InvalidHeadersException("%r header contained a " + "carriage return" % key) + + +def encode_utf8(value): + if isinstance(value, unicode): + value = value.encode('utf8') + return value + + +# look for a real json parser first +try: + # simplejson is popular and pretty good + from simplejson import loads as json_loads +except ImportError: + # 2.6 will have a json module in the stdlib + from json import loads as json_loads + + +def http_connection(url, proxy=None, ssl_compression=True): + """ + Make an HTTPConnection or HTTPSConnection + + :param url: url to connect to + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :param ssl_compression: Whether to enable compression at the SSL layer. + If set to 'False' and the pyOpenSSL library is + present an attempt to disable SSL compression + will be made. This may provide a performance + increase for https upload/download operations. + :returns: tuple of (parsed url, connection object) + :raises ClientException: Unable to handle protocol scheme + """ + url = encode_utf8(url) + parsed = urlparse(url) + proxy_parsed = urlparse(proxy) if proxy else None + host = proxy_parsed if proxy else parsed.netloc + if parsed.scheme == 'http': + conn = HTTPConnection(host) + elif parsed.scheme == 'https': + if ssl_compression is True: + conn = HTTPSConnection(host) + else: + conn = HTTPSConnectionNoSSLComp(host) + else: + raise ClientException('Cannot handle protocol scheme %s for url %s' % + (parsed.scheme, repr(url))) + + def putheader_wrapper(func): + + @wraps(func) + def putheader_escaped(key, value): + func(encode_utf8(key), encode_utf8(value)) + return putheader_escaped + conn.putheader = putheader_wrapper(conn.putheader) + + def request_wrapper(func): + + @wraps(func) + def request_escaped(method, url, body=None, headers=None): + validate_headers(headers) + url = encode_utf8(url) + if body: + body = encode_utf8(body) + func(method, url, body=body, headers=headers or {}) + return request_escaped + conn.request = request_wrapper(conn.request) + if proxy: + try: + # python 2.6 method + conn._set_tunnel(parsed.hostname, parsed.port) + except AttributeError: + # python 2.7 method + conn.set_tunnel(parsed.hostname, parsed.port) + return parsed, conn + + +def get_auth_1_0(url, user, key, snet): + parsed, conn = http_connection(url) + method = 'GET' + conn.request(method, parsed.path, '', + {'X-Auth-User': user, 'X-Auth-Key': key}) + resp = conn.getresponse() + body = resp.read() + http_log((url, method,), {}, resp, body) + url = resp.getheader('x-storage-url') + + # There is a side-effect on current Rackspace 1.0 server where a + # bad URL would get you that document page and a 200. We error out + # if we don't have a x-storage-url header and if we get a body. + if resp.status < 200 or resp.status >= 300 or (body and not url): + raise ClientException('Auth GET failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=parsed.path, http_status=resp.status, + http_reason=resp.reason) + if snet: + parsed = list(urlparse(url)) + # Second item in the list is the netloc + netloc = parsed[1] + parsed[1] = 'snet-' + netloc + url = urlunparse(parsed) + return url, resp.getheader('x-storage-token', + resp.getheader('x-auth-token')) + + +def get_keystoneclient_2_0(auth_url, user, key, os_options, **kwargs): + """ + Authenticate against a auth 2.0 server. + + We are using the keystoneclient library for our 2.0 authentication. + """ + + insecure = kwargs.get('insecure', False) + debug = logger.isEnabledFor(logging.DEBUG) and True or False + + try: + from keystoneclient.v2_0 import client as ksclient + from keystoneclient import exceptions + except ImportError: + sys.exit(''' +Auth version 2.0 requires python-keystoneclient, install it or use Auth +version 1.0 which requires ST_AUTH, ST_USER, and ST_KEY environment +variables to be set or overridden with -A, -U, or -K.''') + + try: + _ksclient = ksclient.Client(username=user, + password=key, + tenant_name=os_options.get('tenant_name'), + tenant_id=os_options.get('tenant_id'), + debug=debug, + cacert=kwargs.get('cacert'), + auth_url=auth_url, insecure=insecure) + except exceptions.Unauthorized: + raise ClientException('Unauthorised. Check username, password' + ' and tenant name/id') + except exceptions.AuthorizationFailure as err: + raise ClientException('Authorization Failure. %s' % err) + service_type = os_options.get('service_type') or 'object-store' + endpoint_type = os_options.get('endpoint_type') or 'publicURL' + try: + endpoint = _ksclient.service_catalog.url_for( + attr='region', + filter_value=os_options.get('region_name'), + service_type=service_type, + endpoint_type=endpoint_type) + except exceptions.EndpointNotFound: + raise ClientException('Endpoint for %s not found - ' + 'have you specified a region?' % service_type) + return (endpoint, _ksclient.auth_token) + + +def get_auth(auth_url, user, key, **kwargs): + """ + Get authentication/authorization credentials. + + The snet parameter is used for Rackspace's ServiceNet internal network + implementation. In this function, it simply adds *snet-* to the beginning + of the host name for the returned storage URL. With Rackspace Cloud Files, + use of this network path causes no bandwidth charges but requires the + client to be running on Rackspace's ServiceNet network. + """ + auth_version = kwargs.get('auth_version', '1') + os_options = kwargs.get('os_options', {}) + + storage_url, token = None, None + if auth_version in ['1.0', '1', 1]: + storage_url, token = get_auth_1_0(auth_url, + user, + key, + kwargs.get('snet')) + elif auth_version in ['2.0', '2', 2]: + # We are allowing to specify a token/storage-url to re-use + # without having to re-authenticate. + if (os_options.get('object_storage_url') and + os_options.get('auth_token')): + return (os_options.get('object_storage_url'), + os_options.get('auth_token')) + + # We are handling a special use case here when we were + # allowing specifying the account/tenant_name with the -U + # argument + if not kwargs.get('tenant_name') and ':' in user: + (os_options['tenant_name'], + user) = user.split(':') + + # We are allowing to have an tenant_name argument in get_auth + # directly without having os_options + if kwargs.get('tenant_name'): + os_options['tenant_name'] = kwargs['tenant_name'] + + if (not 'tenant_name' in os_options): + raise ClientException('No tenant specified') + + insecure = kwargs.get('insecure', False) + cacert = kwargs.get('cacert', None) + storage_url, token = get_keystoneclient_2_0(auth_url, user, + key, os_options, + cacert=cacert, + insecure=insecure) + else: + raise ClientException('Unknown auth_version %s specified.' + % auth_version) + + # Override storage url, if necessary + if os_options.get('object_storage_url'): + return os_options['object_storage_url'], token + else: + return storage_url, token + + +def store_response(resp, response_dict): + """ + store information about an operation into a dict + + :param resp: an http response object containing the response + headers + :param response_dict: a dict into which are placed the + status, reason and a dict of lower-cased headers + """ + if response_dict is not None: + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + + response_dict['status'] = resp.status + response_dict['reason'] = resp.reason + response_dict['headers'] = resp_headers + + +def get_account(url, token, marker=None, limit=None, prefix=None, + end_marker=None, http_conn=None, full_listing=False): + """ + Get a listing of containers for the account. + + :param url: storage URL + :param token: auth token + :param marker: marker query + :param limit: limit query + :param prefix: prefix query + :param end_marker: end_marker query + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param full_listing: if True, return a full listing, else returns a max + of 10000 listings + :returns: a tuple of (response headers, a list of containers) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if not http_conn: + http_conn = http_connection(url) + if full_listing: + rv = get_account(url, token, marker, limit, prefix, + end_marker, http_conn) + listing = rv[1] + while listing: + marker = listing[-1]['name'] + listing = \ + get_account(url, token, marker, limit, prefix, + end_marker, http_conn)[1] + if listing: + rv[1].extend(listing) + return rv + parsed, conn = http_conn + qs = 'format=json' + if marker: + qs += '&marker=%s' % quote(marker) + if limit: + qs += '&limit=%d' % limit + if prefix: + qs += '&prefix=%s' % quote(prefix) + if end_marker: + qs += '&end_marker=%s' % quote(end_marker) + full_path = '%s?%s' % (parsed.path, qs) + headers = {'X-Auth-Token': token} + method = 'GET' + conn.request(method, full_path, '', headers) + resp = conn.getresponse() + body = resp.read() + http_log(("%s?%s" % (url, qs), method,), {'headers': headers}, resp, body) + + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + if resp.status < 200 or resp.status >= 300: + raise ClientException('Account GET failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=parsed.path, http_query=qs, + http_status=resp.status, http_reason=resp.reason, + http_response_content=body) + if resp.status == 204: + body + return resp_headers, [] + return resp_headers, json_loads(body) + + +def head_account(url, token, http_conn=None): + """ + Get account stats. + + :param url: storage URL + :param token: auth token + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + method = "HEAD" + headers = {'X-Auth-Token': token} + conn.request(method, parsed.path, '', headers) + resp = conn.getresponse() + body = resp.read() + http_log((url, method,), {'headers': headers}, resp, body) + if resp.status < 200 or resp.status >= 300: + raise ClientException('Account HEAD failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=parsed.path, http_status=resp.status, + http_reason=resp.reason, + http_response_content=body) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def post_account(url, token, headers, http_conn=None, response_dict=None): + """ + Update an account's metadata. + + :param url: storage URL + :param token: auth token + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param response_dict: an optional dictionary into which to place + the response - status, reason and headers + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + method = 'POST' + headers['X-Auth-Token'] = token + conn.request(method, parsed.path, '', headers) + resp = conn.getresponse() + body = resp.read() + http_log((url, method,), {'headers': headers}, resp, body) + + store_response(resp, response_dict) + + if resp.status < 200 or resp.status >= 300: + raise ClientException('Account POST failed', + http_scheme=parsed.scheme, + http_host=conn.host, + http_port=conn.port, + http_path=parsed.path, + http_status=resp.status, + http_reason=resp.reason, + http_response_content=body) + + +def get_container(url, token, container, marker=None, limit=None, + prefix=None, delimiter=None, end_marker=None, + path=None, http_conn=None, + full_listing=False): + """ + Get a listing of objects for the container. + + :param url: storage URL + :param token: auth token + :param container: container name to get a listing for + :param marker: marker query + :param limit: limit query + :param prefix: prefix query + :param delimiter: string to delimit the queries on + :param end_marker: marker query + :param path: path query (equivalent: "delimiter=/" and "prefix=path/") + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param full_listing: if True, return a full listing, else returns a max + of 10000 listings + :returns: a tuple of (response headers, a list of objects) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if not http_conn: + http_conn = http_connection(url) + if full_listing: + rv = get_container(url, token, container, marker, limit, prefix, + delimiter, end_marker, path, http_conn) + listing = rv[1] + while listing: + if not delimiter: + marker = listing[-1]['name'] + else: + marker = listing[-1].get('name', listing[-1].get('subdir')) + listing = get_container(url, token, container, marker, limit, + prefix, delimiter, end_marker, path, + http_conn)[1] + if listing: + rv[1].extend(listing) + return rv + parsed, conn = http_conn + cont_path = '%s/%s' % (parsed.path, quote(container)) + qs = 'format=json' + if marker: + qs += '&marker=%s' % quote(marker) + if limit: + qs += '&limit=%d' % limit + if prefix: + qs += '&prefix=%s' % quote(prefix) + if delimiter: + qs += '&delimiter=%s' % quote(delimiter) + if end_marker: + qs += '&end_marker=%s' % quote(end_marker) + if path: + qs += '&path=%s' % quote(path) + headers = {'X-Auth-Token': token} + method = 'GET' + conn.request(method, '%s?%s' % (cont_path, qs), '', headers) + resp = conn.getresponse() + body = resp.read() + http_log(('%s?%s' % (url, qs), method,), {'headers': headers}, resp, body) + + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container GET failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=cont_path, + http_query=qs, http_status=resp.status, + http_reason=resp.reason, + http_response_content=body) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + if resp.status == 204: + return resp_headers, [] + return resp_headers, json_loads(body) + + +def head_container(url, token, container, http_conn=None, headers=None): + """ + Get container stats. + + :param url: storage URL + :param token: auth token + :param container: container name to get stats for + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed.path, quote(container)) + method = 'HEAD' + req_headers = {'X-Auth-Token': token} + if headers: + req_headers.update(headers) + conn.request(method, path, '', req_headers) + resp = conn.getresponse() + body = resp.read() + http_log(('%s%s' % (url.replace(parsed.path, ''), path), method,), + {'headers': req_headers}, resp, body) + + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container HEAD failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason, + http_response_content=body) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def put_container(url, token, container, headers=None, http_conn=None, + response_dict=None): + """ + Create a container + + :param url: storage URL + :param token: auth token + :param container: container name to create + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param response_dict: an optional dictionary into which to place + the response - status, reason and headers + :raises ClientException: HTTP PUT request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed.path, quote(container)) + method = 'PUT' + if not headers: + headers = {} + headers['X-Auth-Token'] = token + if not 'content-length' in (k.lower() for k in headers): + headers['Content-Length'] = '0' + conn.request(method, path, '', headers) + resp = conn.getresponse() + body = resp.read() + + store_response(resp, response_dict) + + http_log(('%s%s' % (url.replace(parsed.path, ''), path), method,), + {'headers': headers}, resp, body) + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container PUT failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason, + http_response_content=body) + + +def post_container(url, token, container, headers, http_conn=None, + response_dict=None): + """ + Update a container's metadata. + + :param url: storage URL + :param token: auth token + :param container: container name to update + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param response_dict: an optional dictionary into which to place + the response - status, reason and headers + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed.path, quote(container)) + method = 'POST' + headers['X-Auth-Token'] = token + if not 'content-length' in (k.lower() for k in headers): + headers['Content-Length'] = '0' + conn.request(method, path, '', headers) + resp = conn.getresponse() + body = resp.read() + http_log(('%s%s' % (url.replace(parsed.path, ''), path), method,), + {'headers': headers}, resp, body) + + store_response(resp, response_dict) + + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container POST failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason, + http_response_content=body) + + +def delete_container(url, token, container, http_conn=None, + response_dict=None): + """ + Delete a container + + :param url: storage URL + :param token: auth token + :param container: container name to delete + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param response_dict: an optional dictionary into which to place + the response - status, reason and headers + :raises ClientException: HTTP DELETE request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed.path, quote(container)) + headers = {'X-Auth-Token': token} + method = 'DELETE' + conn.request(method, path, '', headers) + resp = conn.getresponse() + body = resp.read() + http_log(('%s%s' % (url.replace(parsed.path, ''), path), method,), + {'headers': headers}, resp, body) + + store_response(resp, response_dict) + + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container DELETE failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason, + http_response_content=body) + + +def get_object(url, token, container, name, http_conn=None, + resp_chunk_size=None, query_string=None, + response_dict=None, headers=None): + """ + Get an object + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: object name to get + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param resp_chunk_size: if defined, chunk size of data to read. NOTE: If + you specify a resp_chunk_size you must fully read + the object's contents before making another + request. + :param query_string: if set will be appended with '?' to generated path + :param response_dict: an optional dictionary into which to place + the response - status, reason and headers + :param headers: an optional dictionary with additional headers to include + in the request + :returns: a tuple of (response headers, the object's contents) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed.path, quote(container), quote(name)) + if query_string: + path += '?' + query_string + method = 'GET' + headers = headers.copy() if headers else {} + headers['X-Auth-Token'] = token + conn.request(method, path, '', headers) + resp = conn.getresponse() + + parsed_response = {} + store_response(resp, parsed_response) + if response_dict is not None: + response_dict.update(parsed_response) + + if resp.status < 200 or resp.status >= 300: + body = resp.read() + http_log(('%s%s' % (url.replace(parsed.path, ''), path), method,), + {'headers': headers}, resp, body) + raise ClientException('Object GET failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=path, http_status=resp.status, + http_reason=resp.reason, + http_response_content=body) + if resp_chunk_size: + + def _object_body(): + buf = resp.read(resp_chunk_size) + while buf: + yield buf + buf = resp.read(resp_chunk_size) + object_body = _object_body() + else: + object_body = resp.read() + http_log(('%s%s' % (url.replace(parsed.path, ''), path), method,), + {'headers': headers}, resp, None) + + return parsed_response['headers'], object_body + + +def head_object(url, token, container, name, http_conn=None): + """ + Get object info + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: object name to get info for + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed.path, quote(container), quote(name)) + method = 'HEAD' + headers = {'X-Auth-Token': token} + conn.request(method, path, '', headers) + resp = conn.getresponse() + body = resp.read() + http_log(('%s%s' % (url.replace(parsed.path, ''), path), method,), + {'headers': headers}, resp, body) + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object HEAD failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=path, http_status=resp.status, + http_reason=resp.reason, + http_response_content=body) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def put_object(url, token=None, container=None, name=None, contents=None, + content_length=None, etag=None, chunk_size=None, + content_type=None, headers=None, http_conn=None, proxy=None, + query_string=None, response_dict=None): + """ + Put an object + + :param url: storage URL + :param token: auth token; if None, no token will be sent + :param container: container name that the object is in; if None, the + container name is expected to be part of the url + :param name: object name to put; if None, the object name is expected to be + part of the url + :param contents: a string or a file like object to read object data from; + if None, a zero-byte put will be done + :param content_length: value to send as content-length header; also limits + the amount read from contents; if None, it will be + computed via the contents or chunked transfer + encoding will be used + :param etag: etag of contents; if None, no etag will be sent + :param chunk_size: chunk size of data to write; it defaults to 65536; + used only if the the contents object has a 'read' + method, eg. file-like objects, ignored otherwise + :param content_type: value to send as content-type header; if None, no + content-type will be set (remote end will likely try + to auto-detect it) + :param headers: additional headers to include in the request, if any + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :param query_string: if set will be appended with '?' to generated path + :param response_dict: an optional dictionary into which to place + the response - status, reason and headers + :returns: etag + :raises ClientException: HTTP PUT request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url, proxy=proxy) + path = parsed.path + if container: + path = '%s/%s' % (path.rstrip('/'), quote(container)) + if name: + path = '%s/%s' % (path.rstrip('/'), quote(name)) + if query_string: + path += '?' + query_string + if headers: + headers = dict(headers) + else: + headers = {} + if token: + headers['X-Auth-Token'] = token + if etag: + headers['ETag'] = etag.strip('"') + if content_length is not None: + headers['Content-Length'] = str(content_length) + else: + for n, v in headers.iteritems(): + if n.lower() == 'content-length': + content_length = int(v) + if content_type is not None: + headers['Content-Type'] = content_type + if not contents: + headers['Content-Length'] = '0' + if hasattr(contents, 'read'): + if chunk_size is None: + chunk_size = 65536 + conn.putrequest('PUT', path) + for header, value in headers.iteritems(): + conn.putheader(header, value) + if content_length is None: + conn.putheader('Transfer-Encoding', 'chunked') + conn.endheaders() + chunk = contents.read(chunk_size) + while chunk: + conn.send('%x\r\n%s\r\n' % (len(chunk), chunk)) + chunk = contents.read(chunk_size) + conn.send('0\r\n\r\n') + else: + conn.endheaders() + left = content_length + while left > 0: + size = chunk_size + if size > left: + size = left + chunk = contents.read(size) + conn.send(chunk) + left -= len(chunk) + else: + if chunk_size is not None: + warn_msg = '%s object has no \"read\" method, ignoring chunk_size'\ + % type(contents).__name__ + warnings.warn(warn_msg, stacklevel=2) + conn.request('PUT', path, contents, headers) + resp = conn.getresponse() + body = resp.read() + headers = {'X-Auth-Token': token} + http_log(('%s%s' % (url.replace(parsed.path, ''), path), 'PUT',), + {'headers': headers}, resp, body) + + store_response(resp, response_dict) + + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object PUT failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=path, http_status=resp.status, + http_reason=resp.reason, + http_response_content=body) + + return resp.getheader('etag', '').strip('"') + + +def post_object(url, token, container, name, headers, http_conn=None, + response_dict=None): + """ + Update object metadata + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: name of the object to update + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param response_dict: an optional dictionary into which to place + the response - status, reason and headers + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed.path, quote(container), quote(name)) + headers['X-Auth-Token'] = token + conn.request('POST', path, '', headers) + resp = conn.getresponse() + body = resp.read() + http_log(('%s%s' % (url.replace(parsed.path, ''), path), 'POST',), + {'headers': headers}, resp, body) + + store_response(resp, response_dict) + + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object POST failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=path, http_status=resp.status, + http_reason=resp.reason, + http_response_content=body) + + +def delete_object(url, token=None, container=None, name=None, http_conn=None, + headers=None, proxy=None, query_string=None, + response_dict=None): + """ + Delete object + + :param url: storage URL + :param token: auth token; if None, no token will be sent + :param container: container name that the object is in; if None, the + container name is expected to be part of the url + :param name: object name to delete; if None, the object name is expected to + be part of the url + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param headers: additional headers to include in the request + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :param query_string: if set will be appended with '?' to generated path + :param response_dict: an optional dictionary into which to place + the response - status, reason and headers + :raises ClientException: HTTP DELETE request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url, proxy=proxy) + path = parsed.path + if container: + path = '%s/%s' % (path.rstrip('/'), quote(container)) + if name: + path = '%s/%s' % (path.rstrip('/'), quote(name)) + if query_string: + path += '?' + query_string + if headers: + headers = dict(headers) + else: + headers = {} + if token: + headers['X-Auth-Token'] = token + conn.request('DELETE', path, '', headers) + resp = conn.getresponse() + body = resp.read() + http_log(('%s%s' % (url.replace(parsed.path, ''), path), 'DELETE',), + {'headers': headers}, resp, body) + + store_response(resp, response_dict) + + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object DELETE failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason, + http_response_content=body) + + +class Connection(object): + """Convenience class to make requests that will also retry the request""" + + def __init__(self, authurl=None, user=None, key=None, retries=5, + preauthurl=None, preauthtoken=None, snet=False, + starting_backoff=1, max_backoff=64, tenant_name=None, + os_options=None, auth_version="1", cacert=None, + insecure=False, ssl_compression=True): + """ + :param authurl: authentication URL + :param user: user name to authenticate as + :param key: key/password to authenticate with + :param retries: Number of times to retry the request before failing + :param preauthurl: storage URL (if you have already authenticated) + :param preauthtoken: authentication token (if you have already + authenticated) note authurl/user/key/tenant_name + are not required when specifying preauthtoken + :param snet: use SERVICENET internal network default is False + :param starting_backoff: initial delay between retries (seconds) + :param max_backoff: maximum delay between retries (seconds) + :param auth_version: OpenStack auth version, default is 1.0 + :param tenant_name: The tenant/account name, required when connecting + to a auth 2.0 system. + :param os_options: The OpenStack options which can have tenant_id, + auth_token, service_type, endpoint_type, + tenant_name, object_storage_url, region_name + :param insecure: Allow to access insecure keystone server. + The keystone's certificate will not be verified. + :param ssl_compression: Whether to enable compression at the SSL layer. + If set to 'False' and the pyOpenSSL library is + present an attempt to disable SSL compression + will be made. This may provide a performance + increase for https upload/download operations. + """ + self.authurl = authurl + self.user = user + self.key = key + self.retries = retries + self.http_conn = None + self.url = preauthurl + self.token = preauthtoken + self.attempts = 0 + self.snet = snet + self.starting_backoff = starting_backoff + self.max_backoff = max_backoff + self.auth_version = auth_version + self.os_options = os_options or {} + if tenant_name: + self.os_options['tenant_name'] = tenant_name + self.cacert = cacert + self.insecure = insecure + self.ssl_compression = ssl_compression + + def get_auth(self): + return get_auth(self.authurl, self.user, self.key, + snet=self.snet, + auth_version=self.auth_version, + os_options=self.os_options, + cacert=self.cacert, + insecure=self.insecure) + + def http_connection(self): + return http_connection(self.url, + ssl_compression=self.ssl_compression) + + def _add_response_dict(self, target_dict, kwargs): + if target_dict is not None: + response_dict = kwargs['response_dict'] + if 'response_dicts' in target_dict: + target_dict['response_dicts'].append(response_dict) + else: + target_dict['response_dicts'] = [response_dict] + target_dict.update(response_dict) + + def _retry(self, reset_func, func, *args, **kwargs): + self.attempts = 0 + retried_auth = False + backoff = self.starting_backoff + caller_response_dict = kwargs.pop('response_dict', None) + while self.attempts <= self.retries: + self.attempts += 1 + try: + if not self.url or not self.token: + self.url, self.token = self.get_auth() + self.http_conn = None + if not self.http_conn: + self.http_conn = self.http_connection() + kwargs['http_conn'] = self.http_conn + if caller_response_dict is not None: + kwargs['response_dict'] = {} + rv = func(self.url, self.token, *args, **kwargs) + self._add_response_dict(caller_response_dict, kwargs) + return rv + except (socket.error, HTTPException) as e: + self._add_response_dict(caller_response_dict, kwargs) + if self.attempts > self.retries: + logger.exception(e) + raise + self.http_conn = None + except ClientException as err: + self._add_response_dict(caller_response_dict, kwargs) + if self.attempts > self.retries: + logger.exception(err) + raise + if err.http_status == 401: + self.url = self.token = None + if retried_auth or not all((self.authurl, + self.user, + self.key)): + logger.exception(err) + raise + retried_auth = True + elif err.http_status == 408: + self.http_conn = None + elif 500 <= err.http_status <= 599: + pass + else: + logger.exception(err) + raise + sleep(backoff) + backoff = min(backoff * 2, self.max_backoff) + if reset_func: + reset_func(func, *args, **kwargs) + + def head_account(self): + """Wrapper for :func:`head_account`""" + return self._retry(None, head_account) + + def get_account(self, marker=None, limit=None, prefix=None, + end_marker=None, full_listing=False): + """Wrapper for :func:`get_account`""" + # TODO(unknown): With full_listing=True this will restart the entire + # listing with each retry. Need to make a better version that just + # retries where it left off. + return self._retry(None, get_account, marker=marker, limit=limit, + prefix=prefix, end_marker=end_marker, + full_listing=full_listing) + + def post_account(self, headers, response_dict=None): + """Wrapper for :func:`post_account`""" + return self._retry(None, post_account, headers, + response_dict=response_dict) + + def head_container(self, container): + """Wrapper for :func:`head_container`""" + return self._retry(None, head_container, container) + + def get_container(self, container, marker=None, limit=None, prefix=None, + delimiter=None, end_marker=None, path=None, + full_listing=False): + """Wrapper for :func:`get_container`""" + # TODO(unknown): With full_listing=True this will restart the entire + # listing with each retry. Need to make a better version that just + # retries where it left off. + return self._retry(None, get_container, container, marker=marker, + limit=limit, prefix=prefix, delimiter=delimiter, + end_marker=end_marker, path=path, + full_listing=full_listing) + + def put_container(self, container, headers=None, response_dict=None): + """Wrapper for :func:`put_container`""" + return self._retry(None, put_container, container, headers=headers, + response_dict=response_dict) + + def post_container(self, container, headers, response_dict=None): + """Wrapper for :func:`post_container`""" + return self._retry(None, post_container, container, headers, + response_dict=response_dict) + + def delete_container(self, container, response_dict=None): + """Wrapper for :func:`delete_container`""" + return self._retry(None, delete_container, container, + response_dict=response_dict) + + def head_object(self, container, obj): + """Wrapper for :func:`head_object`""" + return self._retry(None, head_object, container, obj) + + def get_object(self, container, obj, resp_chunk_size=None, + query_string=None, response_dict=None, headers=None): + """Wrapper for :func:`get_object`""" + return self._retry(None, get_object, container, obj, + resp_chunk_size=resp_chunk_size, + query_string=query_string, + response_dict=response_dict, headers=headers) + + def put_object(self, container, obj, contents, content_length=None, + etag=None, chunk_size=None, content_type=None, + headers=None, query_string=None, response_dict=None): + """Wrapper for :func:`put_object`""" + + def _default_reset(*args, **kwargs): + raise ClientException('put_object(%r, %r, ...) failure and no ' + 'ability to reset contents for reupload.' + % (container, obj)) + + if isinstance(contents, str): + # if its a str then you can retry as much as you want + reset_func = None + else: + reset_func = _default_reset + tell = getattr(contents, 'tell', None) + seek = getattr(contents, 'seek', None) + if tell and seek: + orig_pos = tell() + reset_func = lambda *a, **k: seek(orig_pos) + elif not contents: + reset_func = lambda *a, **k: None + + return self._retry(reset_func, put_object, container, obj, contents, + content_length=content_length, etag=etag, + chunk_size=chunk_size, content_type=content_type, + headers=headers, query_string=query_string, + response_dict=response_dict) + + def post_object(self, container, obj, headers, response_dict=None): + """Wrapper for :func:`post_object`""" + return self._retry(None, post_object, container, obj, headers, + response_dict=response_dict) + + def delete_object(self, container, obj, query_string=None, + response_dict=None): + """Wrapper for :func:`delete_object`""" + return self._retry(None, delete_object, container, obj, + query_string=query_string, + response_dict=response_dict) diff --git a/awx/lib/site-packages/swiftclient/exceptions.py b/awx/lib/site-packages/swiftclient/exceptions.py new file mode 100644 index 0000000000..fe730e5c31 --- /dev/null +++ b/awx/lib/site-packages/swiftclient/exceptions.py @@ -0,0 +1,72 @@ +# Copyright (c) 2010-2013 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class ClientException(Exception): + + def __init__(self, msg, http_scheme='', http_host='', http_port='', + http_path='', http_query='', http_status=0, http_reason='', + http_device='', http_response_content=''): + Exception.__init__(self, msg) + self.msg = msg + self.http_scheme = http_scheme + self.http_host = http_host + self.http_port = http_port + self.http_path = http_path + self.http_query = http_query + self.http_status = http_status + self.http_reason = http_reason + self.http_device = http_device + self.http_response_content = http_response_content + + def __str__(self): + a = self.msg + b = '' + if self.http_scheme: + b += '%s://' % self.http_scheme + if self.http_host: + b += self.http_host + if self.http_port: + b += ':%s' % self.http_port + if self.http_path: + b += self.http_path + if self.http_query: + b += '?%s' % self.http_query + if self.http_status: + if b: + b = '%s %s' % (b, self.http_status) + else: + b = str(self.http_status) + if self.http_reason: + if b: + b = '%s %s' % (b, self.http_reason) + else: + b = '- %s' % self.http_reason + if self.http_device: + if b: + b = '%s: device %s' % (b, self.http_device) + else: + b = 'device %s' % self.http_device + if self.http_response_content: + if len(self.http_response_content) <= 60: + b += ' %s' % self.http_response_content + else: + b += ' [first 60 chars of response] %s' \ + % self.http_response_content[:60] + return b and '%s: %s' % (a, b) or a + + +class InvalidHeadersException(Exception): + pass diff --git a/awx/lib/site-packages/swiftclient/https_connection.py b/awx/lib/site-packages/swiftclient/https_connection.py new file mode 100644 index 0000000000..2a2dc1f064 --- /dev/null +++ b/awx/lib/site-packages/swiftclient/https_connection.py @@ -0,0 +1,95 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +HTTPS/SSL related functionality +""" + +import socket + +from httplib import HTTPSConnection + +import OpenSSL + +try: + from eventlet.green.OpenSSL.SSL import GreenConnection + from eventlet.greenio import GreenSocket + from eventlet.patcher import is_monkey_patched + + def getsockopt(self, *args, **kwargs): + return self.fd.getsockopt(*args, **kwargs) + # The above is a workaround for an eventlet bug in getsockopt. + # TODO(mclaren): Workaround can be removed when this fix lands: + # https://bitbucket.org/eventlet/eventlet/commits/609f230 + GreenSocket.getsockopt = getsockopt +except ImportError: + def is_monkey_patched(*args): + return False + + +class HTTPSConnectionNoSSLComp(HTTPSConnection): + """ + Extended HTTPSConnection which uses the OpenSSL library + for disabling SSL compression. + Note: This functionality can eventually be replaced + with native Python 3.3 code. + """ + def __init__(self, host): + HTTPSConnection.__init__(self, host) + self.setcontext() + + def setcontext(self): + """ + Set up the OpenSSL context. + """ + self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) + # Disable SSL layer compression. + self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION + + def connect(self): + """ + Connect to an SSL port using the OpenSSL library and apply + per-connection parameters. + """ + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock = OpenSSLConnectionDelegator(self.context, sock) + self.sock.connect((self.host, self.port)) + + +class OpenSSLConnectionDelegator(object): + """ + An OpenSSL.SSL.Connection delegator. + + Supplies an additional 'makefile' method which httplib requires + and is not present in OpenSSL.SSL.Connection. + + Note: Since it is not possible to inherit from OpenSSL.SSL.Connection + a delegator must be used. + """ + def __init__(self, *args, **kwargs): + if is_monkey_patched('socket'): + # If we are running in a monkey patched environment + # use eventlet's GreenConnection -- it handles eventlet's + # non-blocking sockets correctly. + Connection = GreenConnection + else: + Connection = OpenSSL.SSL.Connection + self.connection = Connection(*args, **kwargs) + + def __getattr__(self, name): + return getattr(self.connection, name) + + def makefile(self, *args, **kwargs): + return socket._fileobject(self.connection, *args, **kwargs) diff --git a/awx/lib/site-packages/swiftclient/multithreading.py b/awx/lib/site-packages/swiftclient/multithreading.py new file mode 100644 index 0000000000..890a7899ad --- /dev/null +++ b/awx/lib/site-packages/swiftclient/multithreading.py @@ -0,0 +1,241 @@ +# Copyright (c) 2010-2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from time import sleep +from Queue import Queue +from threading import Thread +from traceback import format_exception + +from swiftclient.exceptions import ClientException + + +class StopWorkerThreadSignal(object): + pass + + +class QueueFunctionThread(Thread): + """ + Calls `func`` for each item in ``queue``; ``func`` is called with a + de-queued item as the first arg followed by ``*args`` and ``**kwargs``. + + Any exceptions raised by ``func`` are stored in :attr:`self.exc_infos`. + + If the optional kwarg ``store_results`` is specified, it must be a list and + each result of invoking ``func`` will be appended to that list. + + Putting a :class:`StopWorkerThreadSignal` instance into queue will cause + this thread to exit. + """ + + def __init__(self, queue, func, *args, **kwargs): + """ + :param queue: A :class:`Queue` object from which work jobs will be + pulled. + :param func: A callable which will be invoked with a dequeued item + followed by ``*args`` and ``**kwargs``. + :param \*args: Optional positional arguments for ``func``. + :param \*\*kwargs: Optional kwargs for func. If the kwarg + ``store_results`` is specified, its value must be a + list, and every result from invoking ``func`` will + be appended to the supplied list. The kwarg + ``store_results`` will not be passed into ``func``. + """ + Thread.__init__(self) + self.queue = queue + self.func = func + self.args = args + self.kwargs = kwargs + self.exc_infos = [] + self.store_results = kwargs.pop('store_results', None) + + def run(self): + while True: + item = self.queue.get() + if isinstance(item, StopWorkerThreadSignal): + break + try: + result = self.func(item, *self.args, **self.kwargs) + if self.store_results is not None: + self.store_results.append(result) + except Exception: + self.exc_infos.append(sys.exc_info()) + + +class QueueFunctionManager(object): + """ + A context manager to handle the life-cycle of a single :class:`Queue` + and a list of associated :class:`QueueFunctionThread` instances. + + This class is not usually instantiated directly. Instead, call the + :meth:`MultiThreadingManager.queue_manager` object method, + which will return an instance of this class. + + When entering the context, ``thread_count`` :class:`QueueFunctionThread` + instances are created and started. The input queue is returned. Inside + the context, any work item put into the queue will get worked on by one of + the :class:`QueueFunctionThread` instances. + + When the context is exited, all threads are sent a + :class:`StopWorkerThreadSignal` instance and then all threads are waited + upon. Finally, any exceptions from any of the threads are reported on via + the supplied ``thread_manager``'s :meth:`error` method. If an + ``error_counter`` list was supplied on instantiation, its first element is + incremented once for every exception which occurred. + """ + + def __init__(self, func, thread_count, thread_manager, thread_args=None, + thread_kwargs=None, error_counter=None, + connection_maker=None): + """ + :param func: The worker function which will be passed into each + :class:`QueueFunctionThread`'s constructor. + :param thread_count: The number of worker threads to run. + :param thread_manager: An instance of :class:`MultiThreadingManager`. + :param thread_args: Optional positional arguments to be passed into + each invocation of ``func`` after the de-queued + work item. + :param thread_kwargs: Optional keyword arguments to be passed into each + invocation of ``func``. If a list is supplied as + the ``store_results`` keyword argument, it will + be filled with every result of invoking ``func`` + in all threads. + :param error_counter: Optional list containing one integer. If + supplied, the list's first element will be + incremented once for each exception in any + thread. This happens only when exiting the + context. + :param connection_maker: Optional callable. If supplied, this callable + will be invoked once per created thread, and + the result will be passed into func after the + de-queued work item but before ``thread_args`` + and ``thread_kwargs``. This is used to ensure + each thread has its own connection to Swift. + """ + self.func = func + self.thread_count = thread_count + self.thread_manager = thread_manager + self.error_counter = error_counter + self.connection_maker = connection_maker + self.queue = Queue(10000) + self.thread_list = [] + self.thread_args = thread_args if thread_args else () + self.thread_kwargs = thread_kwargs if thread_kwargs else {} + + def __enter__(self): + for _junk in xrange(self.thread_count): + if self.connection_maker: + thread_args = (self.connection_maker(),) + self.thread_args + else: + thread_args = self.thread_args + qf_thread = QueueFunctionThread(self.queue, self.func, + *thread_args, **self.thread_kwargs) + qf_thread.start() + self.thread_list.append(qf_thread) + return self.queue + + def __exit__(self, exc_type, exc_value, traceback): + for thread in [t for t in self.thread_list if t.isAlive()]: + self.queue.put(StopWorkerThreadSignal()) + + while any(map(QueueFunctionThread.is_alive, self.thread_list)): + sleep(0.05) + + for thread in self.thread_list: + for info in thread.exc_infos: + if self.error_counter: + self.error_counter[0] += 1 + if isinstance(info[1], ClientException): + self.thread_manager.error(str(info[1])) + else: + self.thread_manager.error(''.join(format_exception(*info))) + + +class MultiThreadingManager(object): + """ + One object to manage context for multi-threading. This should make + bin/swift less error-prone and allow us to test this code. + + This object is a context manager and returns itself into the context. When + entering the context, two printing threads are created (see below) and they + are waited on and cleaned up when exiting the context. + + A convenience method, :meth:`queue_manager`, is provided to create a + :class:`QueueFunctionManager` context manager (a thread-pool with an + associated input queue for work items). + + Also, thread-safe printing to two streams is provided. The + :meth:`print_msg` method will print to the supplied ``print_stream`` + (defaults to ``sys.stdout``) and the :meth:`error` method will print to the + supplied ``error_stream`` (defaults to ``sys.stderr``). Both of these + printing methods will format the given string with any supplied ``*args`` + (a la printf) and encode the result to utf8 if necessary. + + The attribute :attr:`self.error_count` is incremented once per error + message printed, so an application can tell if any worker threads + encountered exceptions or otherwise called :meth:`error` on this instance. + The swift command-line tool uses this to exit non-zero if any error strings + were printed. + """ + + def __init__(self, print_stream=sys.stdout, error_stream=sys.stderr): + """ + :param print_stream: The stream to which :meth:`print_msg` sends + formatted messages, encoded to utf8 if necessary. + :param error_stream: The stream to which :meth:`error` sends formatted + messages, encoded to utf8 if necessary. + """ + self.print_stream = print_stream + self.printer = QueueFunctionManager(self._print, 1, self) + self.error_stream = error_stream + self.error_printer = QueueFunctionManager(self._print_error, 1, self) + self.error_count = 0 + + def __enter__(self): + self.printer.__enter__() + self.error_printer.__enter__() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.error_printer.__exit__(exc_type, exc_value, traceback) + self.printer.__exit__(exc_type, exc_value, traceback) + + def queue_manager(self, func, thread_count, *args, **kwargs): + connection_maker = kwargs.pop('connection_maker', None) + error_counter = kwargs.pop('error_counter', None) + return QueueFunctionManager(func, thread_count, self, thread_args=args, + thread_kwargs=kwargs, + connection_maker=connection_maker, + error_counter=error_counter) + + def print_msg(self, msg, *fmt_args): + if fmt_args: + msg = msg % fmt_args + self.printer.queue.put(msg) + + def error(self, msg, *fmt_args): + if fmt_args: + msg = msg % fmt_args + self.error_printer.queue.put(msg) + + def _print(self, item, stream=None): + if stream is None: + stream = self.print_stream + if isinstance(item, unicode): + item = item.encode('utf8') + print >>stream, item + + def _print_error(self, item): + self.error_count += 1 + return self._print(item, stream=self.error_stream) diff --git a/awx/lib/site-packages/swiftclient/utils.py b/awx/lib/site-packages/swiftclient/utils.py new file mode 100644 index 0000000000..33d89a5455 --- /dev/null +++ b/awx/lib/site-packages/swiftclient/utils.py @@ -0,0 +1,27 @@ +# Copyright (c) 2010-2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Miscellaneous utility functions for use with Swift.""" + +TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y')) + + +def config_true_value(value): + """ + Returns True if the value is either True or a string in TRUE_VALUES. + Returns False otherwise. + This function come from swift.common.utils.config_true_value() + """ + return value is True or \ + (isinstance(value, basestring) and value.lower() in TRUE_VALUES) diff --git a/awx/lib/site-packages/swiftclient/version.py b/awx/lib/site-packages/swiftclient/version.py new file mode 100644 index 0000000000..7bb5d18ed9 --- /dev/null +++ b/awx/lib/site-packages/swiftclient/version.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from pbr import version as pbr_version + +version_info = pbr_version.VersionInfo('python-swiftclient') diff --git a/awx/plugins/inventory/ec2.ini b/awx/plugins/inventory/ec2.ini new file mode 100644 index 0000000000..01a4982d62 --- /dev/null +++ b/awx/plugins/inventory/ec2.ini @@ -0,0 +1,54 @@ +# Ansible EC2 external inventory script settings +# + +[ec2] + +# to talk to a private eucalyptus instance uncomment these lines +# and edit edit eucalyptus_host to be the host name of your cloud controller +#eucalyptus = True +#eucalyptus_host = clc.cloud.domain.org + +# AWS regions to make calls to. Set this to 'all' to make request to all regions +# in AWS and merge the results together. Alternatively, set this to a comma +# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' +regions = all +regions_exclude = us-gov-west-1 + +# When generating inventory, Ansible needs to know how to address a server. +# Each EC2 instance has a lot of variables associated with it. Here is the list: +# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance +# Below are 2 variables that are used as the address of a server: +# - destination_variable +# - vpc_destination_variable + +# This is the normal destination variable to use. If you are running Ansible +# from outside EC2, then 'public_dns_name' makes the most sense. If you are +# running Ansible from within EC2, then perhaps you want to use the internal +# address, and should set this to 'private_dns_name'. +destination_variable = public_dns_name + +# For server inside a VPC, using DNS names may not make sense. When an instance +# has 'subnet_id' set, this variable is used. If the subnet is public, setting +# this to 'ip_address' will return the public IP address. For instances in a +# private subnet, this should be set to 'private_ip_address', and Ansible must +# be run from with EC2. +vpc_destination_variable = ip_address + +# To tag instances on EC2 with the resource records that point to them from +# Route53, uncomment and set 'route53' to True. +route53 = False + +# Additionally, you can specify the list of zones to exclude looking up in +# 'route53_excluded_zones' as a comma-seperated list. +# route53_excluded_zones = samplezone1.com, samplezone2.com + +# API calls to EC2 are slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-ec2.cache +# - ansible-ec2.index +cache_path = /tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +cache_max_age = 300 diff --git a/awx/plugins/inventory/ec2.py b/awx/plugins/inventory/ec2.py new file mode 100755 index 0000000000..383dad95e0 --- /dev/null +++ b/awx/plugins/inventory/ec2.py @@ -0,0 +1,577 @@ +#!/usr/bin/env python + +''' +EC2 external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +AWS EC2 using the Boto library. + +NOTE: This script assumes Ansible is being executed where the environment +variables needed for Boto have already been set: + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +If you're using eucalyptus you need to set the above variables and +you need to define: + + export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus + +For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html + +When run against a specific host, this script returns the following variables: + - ec2_ami_launch_index + - ec2_architecture + - ec2_association + - ec2_attachTime + - ec2_attachment + - ec2_attachmentId + - ec2_client_token + - ec2_deleteOnTermination + - ec2_description + - ec2_deviceIndex + - ec2_dns_name + - ec2_eventsSet + - ec2_group_name + - ec2_hypervisor + - ec2_id + - ec2_image_id + - ec2_instanceState + - ec2_instance_type + - ec2_ipOwnerId + - ec2_ip_address + - ec2_item + - ec2_kernel + - ec2_key_name + - ec2_launch_time + - ec2_monitored + - ec2_monitoring + - ec2_networkInterfaceId + - ec2_ownerId + - ec2_persistent + - ec2_placement + - ec2_platform + - ec2_previous_state + - ec2_private_dns_name + - ec2_private_ip_address + - ec2_publicIp + - ec2_public_dns_name + - ec2_ramdisk + - ec2_reason + - ec2_region + - ec2_requester_id + - ec2_root_device_name + - ec2_root_device_type + - ec2_security_group_ids + - ec2_security_group_names + - ec2_shutdown_state + - ec2_sourceDestCheck + - ec2_spot_instance_request_id + - ec2_state + - ec2_state_code + - ec2_state_reason + - ec2_status + - ec2_subnet_id + - ec2_tenancy + - ec2_virtualization_type + - ec2_vpc_id + +These variables are pulled out of a boto.ec2.instance object. There is a lack of +consistency with variable spellings (camelCase and underscores) since this +just loops through all variables the object exposes. It is preferred to use the +ones with underscores when multiple exist. + +In addition, if an instance has AWS Tags associated with it, each tag is a new +variable named: + - ec2_tag_[Key] = [Value] + +Security groups are comma-separated in 'ec2_security_group_ids' and +'ec2_security_group_names'. +''' + +# (c) 2012, Peter Sankauskas +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import sys +import os +import argparse +import re +from time import time +import boto +from boto import ec2 +from boto import rds +from boto import route53 +import ConfigParser + +try: + import json +except ImportError: + import simplejson as json + + +class Ec2Inventory(object): + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = {} + + # Index of hostname (address) to instance ID + self.index = {} + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if len(self.inventory) == 0: + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print data_to_print + + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + + def read_settings(self): + ''' Reads the settings from the ec2.ini file ''' + + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/ec2.ini') + + # is eucalyptus? + self.eucalyptus_host = None + self.eucalyptus = False + if config.has_option('ec2', 'eucalyptus'): + self.eucalyptus = config.getboolean('ec2', 'eucalyptus') + if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): + self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') + + # Regions + self.regions = [] + configRegions = config.get('ec2', 'regions') + configRegions_exclude = config.get('ec2', 'regions_exclude') + if (configRegions == 'all'): + if self.eucalyptus_host: + self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) + else: + for regionInfo in ec2.regions(): + if regionInfo.name not in configRegions_exclude: + self.regions.append(regionInfo.name) + else: + self.regions = configRegions.split(",") + + # Destination addresses + self.destination_variable = config.get('ec2', 'destination_variable') + self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + + # Route53 + self.route53_enabled = config.getboolean('ec2', 'route53') + self.route53_excluded_zones = [] + if config.has_option('ec2', 'route53_excluded_zones'): + self.route53_excluded_zones.extend( + config.get('ec2', 'route53_excluded_zones', '').split(',')) + + # Cache related + cache_path = config.get('ec2', 'cache_path') + self.cache_path_cache = cache_path + "/ansible-ec2.cache" + self.cache_path_index = cache_path + "/ansible-ec2.index" + self.cache_max_age = config.getint('ec2', 'cache_max_age') + + + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') + self.args = parser.parse_args() + + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + if self.route53_enabled: + self.get_route53_records() + + for region in self.regions: + self.get_instances_by_region(region) + self.get_rds_instances_by_region(region) + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + + def get_instances_by_region(self, region): + ''' Makes an AWS EC2 API call to the list of instances in a particular + region ''' + + try: + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = ec2.connect_to_region(region) + + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + sys.exit(1) + + reservations = conn.get_all_instances() + for reservation in reservations: + for instance in reservation.instances: + self.add_instance(instance, region) + + except boto.exception.BotoServerError as e: + if not self.eucalyptus: + print "Looks like AWS is down again:" + print e + sys.exit(1) + + def get_rds_instances_by_region(self, region): + ''' Makes an AWS API call to the list of RDS instances in a particular + region ''' + + try: + conn = rds.connect_to_region(region) + if conn: + instances = conn.get_all_dbinstances() + for instance in instances: + self.add_rds_instance(instance, region) + except boto.exception.BotoServerError as e: + print "Looks like AWS RDS is down: " + print e + sys.exit(1) + + def get_instance(self, region, instance_id): + ''' Gets details about a specific instance ''' + if self.eucalyptus: + conn = boto.connect_euca(self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = ec2.connect_to_region(region) + + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + sys.exit(1) + + reservations = conn.get_all_instances([instance_id]) + for reservation in reservations: + for instance in reservation.instances: + return instance + + + def add_instance(self, instance, region): + ''' Adds an instance to the inventory and index, as long as it is + addressable ''' + + # Only want running instances + if instance.state != 'running': + return + + # Select the best destination address + if instance.subnet_id: + dest = getattr(instance, self.vpc_destination_variable) + else: + dest = getattr(instance, self.destination_variable) + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + self.inventory[instance.id] = [dest] + + # Inventory: Group by region + self.push(self.inventory, region, dest) + + # Inventory: Group by availability zone + self.push(self.inventory, instance.placement, dest) + + # Inventory: Group by instance type + self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest) + + # Inventory: Group by key pair + if instance.key_name: + self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest) + + # Inventory: Group by security group + try: + for group in instance.groups: + key = self.to_safe("security_group_" + group.name) + self.push(self.inventory, key, dest) + except AttributeError: + print 'Package boto seems a bit older.' + print 'Please upgrade boto >= 2.3.0.' + sys.exit(1) + + # Inventory: Group by tag keys + for k, v in instance.tags.iteritems(): + key = self.to_safe("tag_" + k + "=" + v) + self.push(self.inventory, key, dest) + + # Inventory: Group by Route53 domain names if enabled + if self.route53_enabled: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + self.push(self.inventory, name, dest) + + + def add_rds_instance(self, instance, region): + ''' Adds an RDS instance to the inventory and index, as long as it is + addressable ''' + + # Only want available instances + if instance.status != 'available': + return + + # Select the best destination address + #if instance.subnet_id: + #dest = getattr(instance, self.vpc_destination_variable) + #else: + #dest = getattr(instance, self.destination_variable) + dest = instance.endpoint[0] + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + self.inventory[instance.id] = [dest] + + # Inventory: Group by region + self.push(self.inventory, region, dest) + + # Inventory: Group by availability zone + self.push(self.inventory, instance.availability_zone, dest) + + # Inventory: Group by instance type + self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest) + + # Inventory: Group by security group + try: + if instance.security_group: + key = self.to_safe("security_group_" + instance.security_group.name) + self.push(self.inventory, key, dest) + except AttributeError: + print 'Package boto seems a bit older.' + print 'Please upgrade boto >= 2.3.0.' + sys.exit(1) + + # Inventory: Group by engine + self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) + + # Inventory: Group by parameter group + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) + + + def get_route53_records(self): + ''' Get and store the map of resource records to domain names that + point to them. ''' + + r53_conn = route53.Route53Connection() + all_zones = r53_conn.get_zones() + + route53_zones = [ zone for zone in all_zones if zone.name[:-1] + not in self.route53_excluded_zones ] + + self.route53_records = {} + + for zone in route53_zones: + rrsets = r53_conn.get_all_rrsets(zone.id) + + for record_set in rrsets: + record_name = record_set.name + + if record_name.endswith('.'): + record_name = record_name[:-1] + + for resource in record_set.resource_records: + self.route53_records.setdefault(resource, set()) + self.route53_records[resource].add(record_name) + + + def get_instance_route53_names(self, instance): + ''' Check if an instance is referenced in the records we have from + Route53. If it is, return the list of domain names pointing to said + instance. If nothing points to it, return an empty list. ''' + + instance_attributes = [ 'public_dns_name', 'private_dns_name', + 'ip_address', 'private_ip_address' ] + + name_list = set() + + for attrib in instance_attributes: + try: + value = getattr(instance, attrib) + except AttributeError: + continue + + if value in self.route53_records: + name_list.update(self.route53_records[value]) + + return list(name_list) + + + def get_host_info(self): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if not self.args.host in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if not self.args.host in self.index: + # host migh not exist anymore + return self.json_format_dict({}, True) + + (region, instance_id) = self.index[self.args.host] + + instance = self.get_instance(region, instance_id) + instance_vars = {} + for key in vars(instance): + value = getattr(instance, key) + key = self.to_safe('ec2_' + key) + + # Handle complex types + if type(value) in [int, bool]: + instance_vars[key] = value + elif type(value) in [str, unicode]: + instance_vars[key] = value.strip() + elif type(value) == type(None): + instance_vars[key] = '' + elif key == 'ec2_region': + instance_vars[key] = value.name + elif key == 'ec2_tags': + for k, v in value.iteritems(): + key = self.to_safe('ec2_tag_' + k) + instance_vars[key] = v + elif key == 'ec2_groups': + group_ids = [] + group_names = [] + for group in value: + group_ids.append(group.id) + group_names.append(group.name) + instance_vars["ec2_security_group_ids"] = ','.join(group_ids) + instance_vars["ec2_security_group_names"] = ','.join(group_names) + else: + pass + # TODO Product codes if someone finds them useful + #print key + #print type(value) + #print value + + return self.json_format_dict(instance_vars, True) + + + def push(self, my_dict, key, element): + ''' Pushed an element onto an array that may not have been defined in + the dict ''' + + if key in my_dict: + my_dict[key].append(element); + else: + my_dict[key] = [element] + + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be + used as Ansible groups ''' + + return re.sub("[^A-Za-z0-9\-]", "_", word) + + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +Ec2Inventory() + diff --git a/awx/plugins/inventory/rax.py b/awx/plugins/inventory/rax.py new file mode 100755 index 0000000000..abfc90dd51 --- /dev/null +++ b/awx/plugins/inventory/rax.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python + +# (c) 2013, Jesse Keating +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +inventory: rax +short_description: Rackspace Public Cloud external inventory script +description: + - Generates inventory that Ansible can understand by making API request to Rackspace Public Cloud API + - | + When run against a specific host, this script returns the following variables: + rax_os-ext-sts_task_state + rax_addresses + rax_links + rax_image + rax_os-ext-sts_vm_state + rax_flavor + rax_id + rax_rax-bandwidth_bandwidth + rax_user_id + rax_os-dcf_diskconfig + rax_accessipv4 + rax_accessipv6 + rax_progress + rax_os-ext-sts_power_state + rax_metadata + rax_status + rax_updated + rax_hostid + rax_name + rax_created + rax_tenant_id + rax__loaded + + where some item can have nested structure. + - credentials are set in a credentials file +version_added: None +options: + creds_file: + description: + - File to find the Rackspace Public Cloud credentials in + required: true + default: null + region_name: + description: + - Region name to use in request + required: false + default: DFW +author: Jesse Keating +notes: + - Two environment variables need to be set, RAX_CREDS and RAX_REGION. + - RAX_CREDS points to a credentials file appropriate for pyrax + - RAX_REGION defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +requirements: [ "pyrax" ] +examples: + - description: List server instances + code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list + - description: List server instance properties + code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --host +''' + +import sys +import re +import os +import argparse + +try: + import json +except: + import simplejson as json + +try: + import pyrax +except ImportError: + print('pyrax required for this module') + sys.exit(1) + +# Setup the parser +parser = argparse.ArgumentParser(description='List active instances', + epilog='List by itself will list all the active \ + instances. Listing a specific instance will show \ + all the details about the instance.') + +parser.add_argument('--list', action='store_true', default=True, + help='List active servers') +parser.add_argument('--host', + help='List details about the specific host (IP address)') + +args = parser.parse_args() + +# setup the auth +try: + creds_file = os.environ['RAX_CREDS_FILE'] + region = os.environ['RAX_REGION'] +except KeyError, e: + sys.stderr.write('Unable to load %s\n' % e.message) + sys.exit(1) + +pyrax.set_setting('identity_type', 'rackspace') + +try: + pyrax.set_credential_file(os.path.expanduser(creds_file), + region=region) +except Exception, e: + sys.stderr.write("%s: %s\n" % (e, e.message)) + sys.exit(1) + +# Execute the right stuff +if not args.host: + groups = {} + + # Cycle on servers + for server in pyrax.cloudservers.list(): + # Define group (or set to empty string) + try: + group = server.metadata['group'] + except KeyError: + group = 'undefined' + + # Create group if not exist and add the server + groups.setdefault(group, []).append(server.accessIPv4) + + # Return server list + print(json.dumps(groups)) + sys.exit(0) + +# Get the deets for the instance asked for +results = {} +# This should be only one, but loop anyway +for server in pyrax.cloudservers.list(): + if server.accessIPv4 == args.host: + for key in [key for key in vars(server) if + key not in ('manager', '_info')]: + # Extract value + value = getattr(server, key) + + # Generate sanitized key + key = 'rax_' + re.sub("[^A-Za-z0-9\-]", "_", key).lower() + results[key] = value + +print(json.dumps(results)) +sys.exit(0) diff --git a/fix_virtualenv_setuptools.py b/fix_virtualenv_setuptools.py new file mode 100755 index 0000000000..98f719d5cf --- /dev/null +++ b/fix_virtualenv_setuptools.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +''' +Fix setuptools (in virtualenv) after upgrading to distribute >= 0.7. +''' + +from distutils.sysconfig import get_python_lib +import glob +import os +import shutil + +for f in glob.glob(os.path.join(get_python_lib(), 'setuptools-0.6*.egg*')): + print 'removing', f + if os.path.isdir(f): + shutil.rmtree(f) + else: + os.remove(f) diff --git a/requirements/Babel-1.3.tar.gz b/requirements/Babel-1.3.tar.gz new file mode 100644 index 0000000000..c9500ab22c Binary files /dev/null and b/requirements/Babel-1.3.tar.gz differ diff --git a/requirements/amqp-1.0.13.tar.gz b/requirements/amqp-1.0.13.tar.gz deleted file mode 100644 index 30ce0cdc2a..0000000000 Binary files a/requirements/amqp-1.0.13.tar.gz and /dev/null differ diff --git a/requirements/amqp-1.2.1.tar.gz b/requirements/amqp-1.2.1.tar.gz new file mode 100644 index 0000000000..5d6556cdbd Binary files /dev/null and b/requirements/amqp-1.2.1.tar.gz differ diff --git a/requirements/boto-2.13.3.tar.gz b/requirements/boto-2.13.3.tar.gz new file mode 100644 index 0000000000..7abf178ee3 Binary files /dev/null and b/requirements/boto-2.13.3.tar.gz differ diff --git a/requirements/celery-3.0.22.tar.gz b/requirements/celery-3.0.22.tar.gz deleted file mode 100644 index 862d907ac9..0000000000 Binary files a/requirements/celery-3.0.22.tar.gz and /dev/null differ diff --git a/requirements/celery-3.0.23.tar.gz b/requirements/celery-3.0.23.tar.gz new file mode 100644 index 0000000000..35be88497e Binary files /dev/null and b/requirements/celery-3.0.23.tar.gz differ diff --git a/requirements/d2to1-0.2.11.tar.gz b/requirements/d2to1-0.2.11.tar.gz new file mode 100644 index 0000000000..86b4605ccb Binary files /dev/null and b/requirements/d2to1-0.2.11.tar.gz differ diff --git a/requirements/dev.txt b/requirements/dev.txt index 90f4e8b139..43d220e7d9 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -6,7 +6,9 @@ # Packages used for both development and production: Django>=1.4 -# The following packages are now bundled with AWX (awx/lib/site-packages): +# The following packages and their dependencies are bundled with AWX +# (in awx/lib/site-packages): + #boto #django-auth-ldap #django-celery #django-extensions @@ -15,6 +17,7 @@ Django>=1.4 #djangorestframework>=2.3.0,<2.4.0 #Markdown #pexpect + #pyrax #python-dateutil #requests #South>=0.8,<2.0 diff --git a/requirements/dev_local.txt b/requirements/dev_local.txt index a8cae5330d..ba823d938b 100644 --- a/requirements/dev_local.txt +++ b/requirements/dev_local.txt @@ -1,45 +1,87 @@ # PIP requirements for AWX development/build environment (using only local # packages). Install using "pip --no-index -r dev_local.txt". -distribute-0.6.45.tar.gz - +distribute-0.7.3.zip +setuptools-1.1.6.tar.gz Django-1.5.4.tar.gz -# The following packages are now bundled with AWX (awx/lib/site-packages): - # Needed by python-dateutil, django-extensions: - #six-1.3.0.tar.gz +# The following packages are bundled with AWX (in awx/lib/site-packages): + # For Python2.6 support: + #importlib-1.0.2.tar.bz2 + #ordereddict-1.1.tar.gz + # Needed by python-dateutil, django-extensions, python-novaclient: + #six-1.4.1.tar.gz # Needed by kombu: - #amqp-1.0.13.tar.gz + #amqp-1.2.1.tar.gz #anyjson-0.3.3.tar.gz # Needed by celery: #billiard-2.7.3.32.tar.gz #kombu-2.5.14.tar.gz #python-dateutil-2.1.tar.gz # Needed by django-celery: - #celery-3.0.22.tar.gz - #pytz-2013b.tar.gz + #celery-3.0.23.tar.gz + # Needed by django-celery, babel: + #pytz-2013d.tar.bz2 + # Needed by pbr: + #pip-1.4.1.tar.gz + # Needed by python-novaclient: + #Babel-1.3.tar.gz + #iso8601-0.1.4.tar.gz + #prettytable-0.7.2.tar.bz2 + #requests-2.0.0.tar.gz + #simplejson-3.3.0.tar.gz + # Needed by python-novaclient, python-swiftclient: + #pbr-0.5.21.tar.gz + # Needed by rackspace-novaclient, os-diskconfig-python-novaclient-ext, + # os-networksv2-python-novaclient-ext, + # rax-default-network-flags-python-novaclient-ext, + # rax-scheduled-images-python-novaclient-ext: + #python-novaclient-2.15.0.tar.gz + # Needed by rackspace-novaclient: + #rackspace-auth-openstack-1.0.tar.gz + #os_diskconfig_python_novaclient_ext-0.1.1.tar.gz + #os_networksv2_python_novaclient_ext-0.21.tar.gz + #rax_default_network_flags_python_novaclient_ext-0.1.3.tar.gz + #rax_scheduled_images_python_novaclient_ext-0.2.1.tar.gz + # Needed by distribute: + #setuptools-1.1.6.tar.gz + # Needed by d2to1: + #distribute-0.7.3.tar.gz + # Needed by python-swiftclient: + #d2to1-0.2.11.tar.gz + # Needed by pyrax: + #httplib2-0.8.tar.gz + #keyring-3.0.5.zip + #mock-1.0.1.tar.gz + #python-swiftclient-1.6.0.tar.gz + #rackspace-novaclient-1.3.tar.gz # Remaining dev/prod packages: + #boto-2.13.3.tar.gz #django-auth-ldap-1.1.4.tar.gz - #django-celery-3.0.21.tar.gz - #django-extensions-1.2.0.tar.gz + #django-celery-3.0.23.tar.gz + #django-extensions-1.2.2.tar.gz #django-jsonfield-0.9.10.tar.gz #django-taggit-0.10.tar.gz - #djangorestframework-2.3.7.tar.gz + #djangorestframework-2.3.8.tar.gz #Markdown-2.3.1.tar.gz #pexpect-2.4.tar.gz - #requests-1.2.3.tar.gz + #pyrax-1.5.0.tar.gz #South-0.8.2.tar.gz -# Remaining dev-only packages: +# Dev-only packages: django-debug-toolbar-0.9.4.tar.gz django-devserver-0.6.2.tar.gz +# Needed for Python2.6 support: unittest2-0.5.1.tar.gz +# Needed by pylint: logilab-common-0.60.0.tar.gz astroid-1.0.0.tar.gz +# Needed by django-jenkins: coverage-3.6.tar.gz pylint-1.0.0.tar.gz +# Remaining dev-only packages: django-jenkins-0.14.1.tar.gz -ipython-1.0.0.tar.gz +ipython-1.1.0.tar.gz # You may also need to install the following extra packages using the OS # package manager, or pip if you're running inside a virtualenv. diff --git a/requirements/distribute-0.7.3.zip b/requirements/distribute-0.7.3.zip new file mode 100644 index 0000000000..acd0663b5e Binary files /dev/null and b/requirements/distribute-0.7.3.zip differ diff --git a/requirements/django-celery-3.0.21.tar.gz b/requirements/django-celery-3.0.21.tar.gz deleted file mode 100644 index d91540ba97..0000000000 Binary files a/requirements/django-celery-3.0.21.tar.gz and /dev/null differ diff --git a/requirements/django-celery-3.0.23.tar.gz b/requirements/django-celery-3.0.23.tar.gz new file mode 100644 index 0000000000..98c4fafa1f Binary files /dev/null and b/requirements/django-celery-3.0.23.tar.gz differ diff --git a/requirements/django-extensions-1.2.0.tar.gz b/requirements/django-extensions-1.2.0.tar.gz deleted file mode 100644 index 7600cbb1f3..0000000000 Binary files a/requirements/django-extensions-1.2.0.tar.gz and /dev/null differ diff --git a/requirements/django-extensions-1.2.2.tar.gz b/requirements/django-extensions-1.2.2.tar.gz new file mode 100644 index 0000000000..1e8b875dd4 Binary files /dev/null and b/requirements/django-extensions-1.2.2.tar.gz differ diff --git a/requirements/djangorestframework-2.3.7.tar.gz b/requirements/djangorestframework-2.3.7.tar.gz deleted file mode 100644 index 4d708b5ce8..0000000000 Binary files a/requirements/djangorestframework-2.3.7.tar.gz and /dev/null differ diff --git a/requirements/djangorestframework-2.3.8.tar.gz b/requirements/djangorestframework-2.3.8.tar.gz new file mode 100644 index 0000000000..8868c89a1f Binary files /dev/null and b/requirements/djangorestframework-2.3.8.tar.gz differ diff --git a/requirements/httplib2-0.8.tar.gz b/requirements/httplib2-0.8.tar.gz new file mode 100644 index 0000000000..1ab951212e Binary files /dev/null and b/requirements/httplib2-0.8.tar.gz differ diff --git a/requirements/importlib-1.0.2.tar.bz2 b/requirements/importlib-1.0.2.tar.bz2 new file mode 100644 index 0000000000..77afb41324 Binary files /dev/null and b/requirements/importlib-1.0.2.tar.bz2 differ diff --git a/requirements/ipython-1.0.0.tar.gz b/requirements/ipython-1.1.0.tar.gz similarity index 53% rename from requirements/ipython-1.0.0.tar.gz rename to requirements/ipython-1.1.0.tar.gz index b2a4250838..30d23a7d30 100644 Binary files a/requirements/ipython-1.0.0.tar.gz and b/requirements/ipython-1.1.0.tar.gz differ diff --git a/requirements/iso8601-0.1.4.tar.gz b/requirements/iso8601-0.1.4.tar.gz new file mode 100644 index 0000000000..59bc203d1a Binary files /dev/null and b/requirements/iso8601-0.1.4.tar.gz differ diff --git a/requirements/keyring-3.0.5.zip b/requirements/keyring-3.0.5.zip new file mode 100644 index 0000000000..48ba40658f Binary files /dev/null and b/requirements/keyring-3.0.5.zip differ diff --git a/requirements/mock-1.0.1.tar.gz b/requirements/mock-1.0.1.tar.gz new file mode 100644 index 0000000000..4fdea77c71 Binary files /dev/null and b/requirements/mock-1.0.1.tar.gz differ diff --git a/requirements/ordereddict-1.1.tar.gz b/requirements/ordereddict-1.1.tar.gz new file mode 100644 index 0000000000..2955c19034 Binary files /dev/null and b/requirements/ordereddict-1.1.tar.gz differ diff --git a/requirements/os_diskconfig_python_novaclient_ext-0.1.1.tar.gz b/requirements/os_diskconfig_python_novaclient_ext-0.1.1.tar.gz new file mode 100644 index 0000000000..2536121df9 Binary files /dev/null and b/requirements/os_diskconfig_python_novaclient_ext-0.1.1.tar.gz differ diff --git a/requirements/os_networksv2_python_novaclient_ext-0.21.tar.gz b/requirements/os_networksv2_python_novaclient_ext-0.21.tar.gz new file mode 100644 index 0000000000..3d8030afdb Binary files /dev/null and b/requirements/os_networksv2_python_novaclient_ext-0.21.tar.gz differ diff --git a/requirements/pbr-0.5.21.tar.gz b/requirements/pbr-0.5.21.tar.gz new file mode 100644 index 0000000000..6163f577d5 Binary files /dev/null and b/requirements/pbr-0.5.21.tar.gz differ diff --git a/requirements/pip-1.4.1.tar.gz b/requirements/pip-1.4.1.tar.gz new file mode 100644 index 0000000000..f56454f75a Binary files /dev/null and b/requirements/pip-1.4.1.tar.gz differ diff --git a/requirements/prettytable-0.7.2.tar.bz2 b/requirements/prettytable-0.7.2.tar.bz2 new file mode 100644 index 0000000000..651c6cc3e0 Binary files /dev/null and b/requirements/prettytable-0.7.2.tar.bz2 differ diff --git a/requirements/prod.txt b/requirements/prod.txt index 11635fbaba..d1d64efb1a 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -3,7 +3,9 @@ Django>=1.4 -# The following packages are now bundled with AWX (awx/lib/site-packages): +# The following packages and their dependencies are bundled with AWX +# (in awx/lib/site-packages): + #boto #django-auth-ldap #django-celery #django-extensions @@ -12,6 +14,7 @@ Django>=1.4 #djangorestframework>=2.3.0,<2.4.0 #Markdown #pexpect + #pyrax #python-dateutil #requests #South>=0.8,<2.0 diff --git a/requirements/prod_local.txt b/requirements/prod_local.txt index f6ad22c556..58f467eb26 100644 --- a/requirements/prod_local.txt +++ b/requirements/prod_local.txt @@ -1,31 +1,69 @@ # PIP requirements for AWX production environment (using only local packages). # Install using "pip --no-index -r prod_local.txt". -Django-1.5.2.tar.gz +Django-1.5.4.tar.gz -# The following packages are now bundled with AWX (awx/lib/site-packages): - # Needed by python-dateutil, django-extensions: - #six-1.3.0.tar.gz +# The following packages are bundled with AWX (in awx/lib/site-packages): + # For Python2.6 support: + #importlib-1.0.2.tar.bz2 + #ordereddict-1.1.tar.gz + # Needed by python-dateutil, django-extensions, python-novaclient: + #six-1.4.1.tar.gz # Needed by kombu: - #amqp-1.0.13.tar.gz + #amqp-1.2.1.tar.gz #anyjson-0.3.3.tar.gz # Needed by celery: #billiard-2.7.3.32.tar.gz #kombu-2.5.14.tar.gz #python-dateutil-2.1.tar.gz # Needed by django-celery: - #celery-3.0.22.tar.gz - #pytz-2013b.tar.gz + #celery-3.0.23.tar.gz + # Needed by django-celery, babel: + #pytz-2013d.tar.bz2 + # Needed by pbr: + #pip-1.4.1.tar.gz + # Needed by python-novaclient: + #Babel-1.3.tar.gz + #iso8601-0.1.4.tar.gz + #prettytable-0.7.2.tar.bz2 + #requests-2.0.0.tar.gz + #simplejson-3.3.0.tar.gz + # Needed by python-novaclient, python-swiftclient: + #pbr-0.5.21.tar.gz + # Needed by rackspace-novaclient, os-diskconfig-python-novaclient-ext, + # os-networksv2-python-novaclient-ext, + # rax-default-network-flags-python-novaclient-ext, + # rax-scheduled-images-python-novaclient-ext: + #python-novaclient-2.15.0.tar.gz + # Needed by rackspace-novaclient: + #rackspace-auth-openstack-1.0.tar.gz + #os_diskconfig_python_novaclient_ext-0.1.1.tar.gz + #os_networksv2_python_novaclient_ext-0.21.tar.gz + #rax_default_network_flags_python_novaclient_ext-0.1.3.tar.gz + #rax_scheduled_images_python_novaclient_ext-0.2.1.tar.gz + # Needed by distribute: + #setuptools-1.1.6.tar.gz + # Needed by d2to1: + #distribute-0.7.3.tar.gz + # Needed by python-swiftclient: + #d2to1-0.2.11.tar.gz + # Needed by pyrax: + #httplib2-0.8.tar.gz + #keyring-3.0.5.zip + #mock-1.0.1.tar.gz + #python-swiftclient-1.6.0.tar.gz + #rackspace-novaclient-1.3.tar.gz # Remaining dev/prod packages: + #boto-2.13.3.tar.gz #django-auth-ldap-1.1.4.tar.gz - #django-celery-3.0.21.tar.gz - #django-extensions-1.2.0.tar.gz + #django-celery-3.0.23.tar.gz + #django-extensions-1.2.2.tar.gz #django-jsonfield-0.9.10.tar.gz #django-taggit-0.10.tar.gz - #djangorestframework-2.3.7.tar.gz + #djangorestframework-2.3.8.tar.gz #Markdown-2.3.1.tar.gz #pexpect-2.4.tar.gz - #requests-1.2.3.tar.gz + #pyrax-1.5.0.tar.gz #South-0.8.2.tar.gz # You may also need to install the following extra packages using the OS diff --git a/requirements/pyrax-1.5.0.tar.gz b/requirements/pyrax-1.5.0.tar.gz new file mode 100644 index 0000000000..5b778ba89a Binary files /dev/null and b/requirements/pyrax-1.5.0.tar.gz differ diff --git a/requirements/python-novaclient-2.15.0.tar.gz b/requirements/python-novaclient-2.15.0.tar.gz new file mode 100644 index 0000000000..0e51361285 Binary files /dev/null and b/requirements/python-novaclient-2.15.0.tar.gz differ diff --git a/requirements/python-swiftclient-1.6.0.tar.gz b/requirements/python-swiftclient-1.6.0.tar.gz new file mode 100644 index 0000000000..8f9c0ef99a Binary files /dev/null and b/requirements/python-swiftclient-1.6.0.tar.gz differ diff --git a/requirements/pytz-2013b.tar.gz b/requirements/pytz-2013b.tar.gz deleted file mode 100644 index d6eaef7762..0000000000 Binary files a/requirements/pytz-2013b.tar.gz and /dev/null differ diff --git a/requirements/pytz-2013d.tar.bz2 b/requirements/pytz-2013d.tar.bz2 new file mode 100644 index 0000000000..53748f3204 Binary files /dev/null and b/requirements/pytz-2013d.tar.bz2 differ diff --git a/requirements/rackspace-auth-openstack-1.0.tar.gz b/requirements/rackspace-auth-openstack-1.0.tar.gz new file mode 100644 index 0000000000..e267933241 Binary files /dev/null and b/requirements/rackspace-auth-openstack-1.0.tar.gz differ diff --git a/requirements/rackspace-novaclient-1.3.tar.gz b/requirements/rackspace-novaclient-1.3.tar.gz new file mode 100644 index 0000000000..e9e90dd33c Binary files /dev/null and b/requirements/rackspace-novaclient-1.3.tar.gz differ diff --git a/requirements/rax_default_network_flags_python_novaclient_ext-0.1.3.tar.gz b/requirements/rax_default_network_flags_python_novaclient_ext-0.1.3.tar.gz new file mode 100644 index 0000000000..a32057cd0f Binary files /dev/null and b/requirements/rax_default_network_flags_python_novaclient_ext-0.1.3.tar.gz differ diff --git a/requirements/rax_scheduled_images_python_novaclient_ext-0.2.1.tar.gz b/requirements/rax_scheduled_images_python_novaclient_ext-0.2.1.tar.gz new file mode 100644 index 0000000000..46acba28bc Binary files /dev/null and b/requirements/rax_scheduled_images_python_novaclient_ext-0.2.1.tar.gz differ diff --git a/requirements/requests-1.2.3.tar.gz b/requirements/requests-1.2.3.tar.gz deleted file mode 100644 index cf59fbae87..0000000000 Binary files a/requirements/requests-1.2.3.tar.gz and /dev/null differ diff --git a/requirements/requests-2.0.0.tar.gz b/requirements/requests-2.0.0.tar.gz new file mode 100644 index 0000000000..15a21d836f Binary files /dev/null and b/requirements/requests-2.0.0.tar.gz differ diff --git a/requirements/setuptools-1.1.6.tar.gz b/requirements/setuptools-1.1.6.tar.gz new file mode 100644 index 0000000000..18c311c284 Binary files /dev/null and b/requirements/setuptools-1.1.6.tar.gz differ diff --git a/requirements/simplejson-3.3.0.tar.gz b/requirements/simplejson-3.3.0.tar.gz new file mode 100644 index 0000000000..bfeb025e40 Binary files /dev/null and b/requirements/simplejson-3.3.0.tar.gz differ diff --git a/requirements/six-1.3.0.tar.gz b/requirements/six-1.3.0.tar.gz deleted file mode 100644 index 98d658cf87..0000000000 Binary files a/requirements/six-1.3.0.tar.gz and /dev/null differ diff --git a/requirements/six-1.4.1.tar.gz b/requirements/six-1.4.1.tar.gz new file mode 100644 index 0000000000..addecf221d Binary files /dev/null and b/requirements/six-1.4.1.tar.gz differ