diff --git a/awx/api/serializers.py b/awx/api/serializers.py index c104e1c0c3..d1360c911a 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -302,6 +302,24 @@ class BaseSerializer(serializers.ModelSerializer): else: return obj.active + def get_validation_exclusions(self, instance=None): + # Override base class method to continue to use model validation for + # fields (including optional ones), appears this was broken by DRF + # 2.3.13 update. + cls = self.opts.model + opts = get_concrete_model(cls)._meta + exclusions = [field.name for field in opts.fields + opts.many_to_many] + for field_name, field in self.fields.items(): + field_name = field.source or field_name + if field_name not in exclusions: + continue + if field.read_only: + continue + if isinstance(field, serializers.Serializer): + continue + exclusions.remove(field_name) + return exclusions + class UnifiedJobTemplateSerializer(BaseSerializer): diff --git a/awx/lib/site-packages/README b/awx/lib/site-packages/README index 254e363882..d3f904b1b4 100644 --- a/awx/lib/site-packages/README +++ b/awx/lib/site-packages/README @@ -1,54 +1,54 @@ -Local versions of third-party packages required by AWX. Package names and +Local versions of third-party packages required by Tower. Package names and versions are listed below, along with notes on which files are included. -amqp==1.3.3 (amqp/*) +amqp==1.4.4 (amqp/*) anyjson==0.3.3 (anyjson/*) argparse==1.2.1 (argparse.py, needed for Python 2.6 support) Babel==1.3 (babel/*, excluded bin/pybabel) -billiard==3.3.0.13 (billiard/*, funtests/*, excluded _billiard.so) -boto==2.21.2 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin, +billiard==3.3.0.16 (billiard/*, funtests/*, excluded _billiard.so) +boto==2.27.0 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin, bin/cq, bin/cwutil, bin/dynamodb_dump, bin/dynamodb_load, bin/elbadmin, bin/fetch_file, bin/glacier, bin/instance_events, bin/kill_instance, bin/launch_instance, bin/list_instances, bin/lss3, bin/mturk, bin/pyami_sendmail, bin/route53, bin/s3put, bin/sdbadmin, bin/taskadmin) -celery==3.1.7 (celery/*, excluded bin/celery*) +celery==3.1.10 (celery/*, excluded bin/celery*) d2to1==0.2.11 (d2to1/*) distribute==0.7.3 (no files) -django-auth-ldap==1.1.7 (django_auth_ldap/*) -django-celery==3.1.1 (djcelery/*) -django-extensions==1.2.5 (django_extensions/*) +django-auth-ldap==1.1.8 (django_auth_ldap/*) +django-celery==3.1.10 (djcelery/*) +django-extensions==1.3.3 (django_extensions/*) django-jsonfield==0.9.12 (jsonfield/*, minor fix in jsonfield/fields.py) django-polymorphic==0.5.3 (polymorphic/*) django-split-settings==0.1.1 (split_settings/*) django-taggit==0.11.2 (taggit/*) -djangorestframework==2.3.10 (rest_framework/*) +djangorestframework==2.3.13 (rest_framework/*) httplib2==0.8 (httplib2/*) -importlib==1.0.2 (importlib/*, needed for Python 2.6 support) -iso8601==0.1.8 (iso8601/*) -keyring==3.3 (keyring/*, excluded bin/keyring) -kombu==3.0.8 (kombu/*) -Markdown==2.3.1 (markdown/*, excluded bin/markdown_py) +importlib==1.0.3 (importlib/*, needed for Python 2.6 support) +iso8601==0.1.10 (iso8601/*) +keyring==3.7 (keyring/*, excluded bin/keyring) +kombu==3.0.14 (kombu/*) +Markdown==2.4 (markdown/*, excluded bin/markdown_py) mock==1.0.1 (mock.py) ordereddict==1.1 (ordereddict.py, needed for Python 2.6 support) -os-diskconfig-python-novaclient-ext==0.1.1 (os_diskconfig_python_novaclient_ext/*) +os-diskconfig-python-novaclient-ext==0.1.2 (os_diskconfig_python_novaclient_ext/*) os-networksv2-python-novaclient-ext==0.21 (os_networksv2_python_novaclient_ext.py) -os-virtual-interfacesv2-python-novaclient-ext==0.14 (os_virtual_interfacesv2_python_novaclient_ext.py) -pbr==0.5.23 (pbr/*) -pexpect==3.0 (pexpect/*, excluded pxssh.py, fdpexpect.py, FSM.py, screen.py, +os-virtual-interfacesv2-python-novaclient-ext==0.15 (os_virtual_interfacesv2_python_novaclient_ext.py) +pbr==0.8.0 (pbr/*) +pexpect==3.1 (pexpect/*, excluded pxssh.py, fdpexpect.py, FSM.py, screen.py, ANSI.py) -pip==1.5 (pip/*, excluded bin/pip*) +pip==1.5.4 (pip/*, excluded bin/pip*) prettytable==0.7.2 (prettytable.py) -pyrax==1.6.2 (pyrax/*) +pyrax==1.7.2 (pyrax/*) python-dateutil==2.2 (dateutil/*) -python-novaclient==2.15.0 (novaclient/*, excluded bin/nova) -python-swiftclient==1.8.0 (swiftclient/*, excluded bin/swift) -pytz==2013.8 (pytz/*) -rackspace-auth-openstack==1.2 (rackspace_auth_openstack/*) +python-novaclient==2.17.0 (novaclient/*, excluded bin/nova) +python-swiftclient==2.0.3 (swiftclient/*, excluded bin/swift) +pytz==2014.2 (pytz/*) +rackspace-auth-openstack==1.3 (rackspace_auth_openstack/*) rackspace-novaclient==1.4 (no files) -rax-default-network-flags-python-novaclient-ext==0.1.3 (rax_default_network_flags_python_novaclient_ext/*) +rax-default-network-flags-python-novaclient-ext==0.2.3 (rax_default_network_flags_python_novaclient_ext/*) rax-scheduled-images-python-novaclient-ext==0.2.1 (rax_scheduled_images_python_novaclient_ext/*) -requests==2.1.0 (requests/*) -setuptools==2.0.2 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*) -simplejson==3.3.1 (simplejson/*, excluded simplejson/_speedups.so) -six==1.4.1 (six.py) +requests==2.2.1 (requests/*) +setuptools==2.2 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*) +simplejson==3.3.3 (simplejson/*, excluded simplejson/_speedups.so) +six==1.6.1 (six.py) South==0.8.4 (south/*) diff --git a/awx/lib/site-packages/amqp/__init__.py b/awx/lib/site-packages/amqp/__init__.py index 210942bb0e..04882736fb 100644 --- a/awx/lib/site-packages/amqp/__init__.py +++ b/awx/lib/site-packages/amqp/__init__.py @@ -16,7 +16,7 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 from __future__ import absolute_import -VERSION = (1, 3, 3) +VERSION = (1, 4, 4) __version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:]) __author__ = 'Barry Pederson' __maintainer__ = 'Ask Solem' diff --git a/awx/lib/site-packages/amqp/abstract_channel.py b/awx/lib/site-packages/amqp/abstract_channel.py index 5e37bf971c..28cfe136e5 100644 --- a/awx/lib/site-packages/amqp/abstract_channel.py +++ b/awx/lib/site-packages/amqp/abstract_channel.py @@ -19,12 +19,6 @@ from __future__ import absolute_import from .exceptions import AMQPNotImplementedError, RecoverableConnectionError from .serialization import AMQPWriter -try: - bytes -except NameError: - # Python 2.5 and lower - bytes = str - __all__ = ['AbstractChannel'] diff --git a/awx/lib/site-packages/amqp/channel.py b/awx/lib/site-packages/amqp/channel.py index ea59f0c1b0..05eb09a206 100644 --- a/awx/lib/site-packages/amqp/channel.py +++ b/awx/lib/site-packages/amqp/channel.py @@ -31,6 +31,15 @@ __all__ = ['Channel'] AMQP_LOGGER = logging.getLogger('amqp') +EXCHANGE_AUTODELETE_DEPRECATED = """\ +The auto_delete flag for exchanges has been deprecated and will be removed +from py-amqp v1.5.0.\ +""" + + +class VDeprecationWarning(DeprecationWarning): + pass + class Channel(AbstractChannel): """Work with channels @@ -604,8 +613,7 @@ class Channel(AbstractChannel): self._send_method((40, 10), args) if auto_delete: - warn(DeprecationWarning( - 'auto_delete exchanges has been deprecated')) + warn(VDeprecationWarning(EXCHANGE_AUTODELETE_DEPRECATED)) if not nowait: return self.wait(allowed_methods=[ diff --git a/awx/lib/site-packages/amqp/connection.py b/awx/lib/site-packages/amqp/connection.py index 9474ab2a18..c93d91fedc 100644 --- a/awx/lib/site-packages/amqp/connection.py +++ b/awx/lib/site-packages/amqp/connection.py @@ -34,7 +34,7 @@ from .exceptions import ( ConnectionForced, ConnectionError, error_for_code, RecoverableConnectionError, RecoverableChannelError, ) -from .five import items, range, values +from .five import items, range, values, monotonic from .method_framing import MethodReader, MethodWriter from .serialization import AMQPWriter from .transport import create_transport @@ -80,9 +80,26 @@ class Connection(AbstractChannel): """ Channel = Channel + #: Final heartbeat interval value (in float seconds) after negotiation + heartbeat = None + + #: Original heartbeat interval value proposed by client. + client_heartbeat = None + + #: Original heartbeat interval proposed by server. + server_heartbeat = None + + #: Time of last heartbeat sent (in monotonic time, if available). + last_heartbeat_sent = 0 + + #: Time of last heartbeat received (in monotonic time, if available). + last_heartbeat_received = 0 + + #: Number of bytes sent to socket at the last heartbeat check. prev_sent = None + + #: Number of bytes received from socket at the last heartbeat check. prev_recv = None - missed_heartbeats = 0 def __init__(self, host='localhost', userid='guest', password='guest', login_method='AMQPLAIN', login_response=None, @@ -125,7 +142,7 @@ class Connection(AbstractChannel): # Properties set in the Tune method self.channel_max = channel_max self.frame_max = frame_max - self.heartbeat = heartbeat + self.client_heartbeat = heartbeat self.confirm_publish = confirm_publish @@ -840,10 +857,22 @@ class Connection(AbstractChannel): want a heartbeat. """ + client_heartbeat = self.client_heartbeat or 0 self.channel_max = args.read_short() or self.channel_max self.frame_max = args.read_long() or self.frame_max self.method_writer.frame_max = self.frame_max - heartbeat = args.read_short() # noqa + self.server_heartbeat = args.read_short() or 0 + + # negotiate the heartbeat interval to the smaller of the + # specified values + if self.server_heartbeat == 0 or client_heartbeat == 0: + self.heartbeat = max(self.server_heartbeat, client_heartbeat) + else: + self.heartbeat = min(self.server_heartbeat, client_heartbeat) + + # Ignore server heartbeat if client_heartbeat is disabled + if not self.client_heartbeat: + self.heartbeat = 0 self._x_tune_ok(self.channel_max, self.frame_max, self.heartbeat) @@ -851,28 +880,34 @@ class Connection(AbstractChannel): self.transport.write_frame(8, 0, bytes()) def heartbeat_tick(self, rate=2): - """Verify that hartbeats are sent and received. - - :keyword rate: Rate is how often the tick is called - compared to the actual heartbeat value. E.g. if - the heartbeat is set to 3 seconds, and the tick - is called every 3 / 2 seconds, then the rate is 2. + """Send heartbeat packets, if necessary, and fail if none have been + received recently. This should be called frequently, on the order of + once per second. + :keyword rate: Ignored """ + if not self.heartbeat: + return + + # treat actual data exchange in either direction as a heartbeat sent_now = self.method_writer.bytes_sent recv_now = self.method_reader.bytes_recv - - if self.prev_sent is not None and self.prev_sent == sent_now: - self.send_heartbeat() - - if self.prev_recv is not None and self.prev_recv == recv_now: - self.missed_heartbeats += 1 - else: - self.missed_heartbeats = 0 - + if self.prev_sent is None or self.prev_sent != sent_now: + self.last_heartbeat_sent = monotonic() + if self.prev_recv is None or self.prev_recv != recv_now: + self.last_heartbeat_received = monotonic() self.prev_sent, self.prev_recv = sent_now, recv_now - if self.missed_heartbeats >= rate: + # send a heartbeat if it's time to do so + if monotonic() > self.last_heartbeat_sent + self.heartbeat: + self.send_heartbeat() + self.last_heartbeat_sent = monotonic() + + # if we've missed two intervals' heartbeats, fail; this gives the + # server enough time to send heartbeats a little late + if (self.last_heartbeat_received and + self.last_heartbeat_received + 2 * + self.heartbeat < monotonic()): raise ConnectionForced('Too many heartbeats missed') def _x_tune_ok(self, channel_max, frame_max, heartbeat): diff --git a/awx/lib/site-packages/amqp/five.py b/awx/lib/site-packages/amqp/five.py index 25b83fc08e..5157df59a5 100644 --- a/awx/lib/site-packages/amqp/five.py +++ b/awx/lib/site-packages/amqp/five.py @@ -131,3 +131,58 @@ def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): return Type(Class.__name__, Class.__bases__, attrs) return _clone_with_metaclass + +############## time.monotonic ################################################ + +if sys.version_info < (3, 3): + + import platform + SYSTEM = platform.system() + + if SYSTEM == 'Darwin': + import ctypes + from ctypes.util import find_library + libSystem = ctypes.CDLL('libSystem.dylib') + CoreServices = ctypes.CDLL(find_library('CoreServices'), + use_errno=True) + mach_absolute_time = libSystem.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds + absolute_to_nanoseconds.restype = ctypes.c_uint64 + absolute_to_nanoseconds.argtypes = [ctypes.c_uint64] + + def _monotonic(): + return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9 + + elif SYSTEM == 'Linux': + # from stackoverflow: + # questions/1205722/how-do-i-get-monotonic-time-durations-in-python + import ctypes + import os + + CLOCK_MONOTONIC = 1 # see + + class timespec(ctypes.Structure): + _fields_ = [ + ('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long), + ] + + librt = ctypes.CDLL('librt.so.1', use_errno=True) + clock_gettime = librt.clock_gettime + clock_gettime.argtypes = [ + ctypes.c_int, ctypes.POINTER(timespec), + ] + + def _monotonic(): # noqa + t = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: + errno_ = ctypes.get_errno() + raise OSError(errno_, os.strerror(errno_)) + return t.tv_sec + t.tv_nsec * 1e-9 + else: + from time import time as _monotonic +try: + from time import monotonic +except ImportError: + monotonic = _monotonic # noqa diff --git a/awx/lib/site-packages/amqp/method_framing.py b/awx/lib/site-packages/amqp/method_framing.py index 85fbfba5dd..b454524727 100644 --- a/awx/lib/site-packages/amqp/method_framing.py +++ b/awx/lib/site-packages/amqp/method_framing.py @@ -19,12 +19,6 @@ from __future__ import absolute_import from collections import defaultdict, deque from struct import pack, unpack -try: - bytes -except NameError: - # Python 2.5 and lower - bytes = str - from .basic_message import Message from .exceptions import AMQPError, UnexpectedFrame from .five import range, string diff --git a/awx/lib/site-packages/amqp/serialization.py b/awx/lib/site-packages/amqp/serialization.py index 6a747029e3..528d0b7fe1 100644 --- a/awx/lib/site-packages/amqp/serialization.py +++ b/awx/lib/site-packages/amqp/serialization.py @@ -25,6 +25,7 @@ import sys from datetime import datetime from decimal import Decimal +from io import BytesIO from struct import pack, unpack from time import mktime @@ -39,19 +40,6 @@ if IS_PY3K: else: byte = chr -try: - from io import BytesIO -except ImportError: # Py2.5 - try: - from cStringIO import StringIO as BytesIO # noqa - except ImportError: - from StringIO import StringIO as BytesIO # noqa - -try: - bytes -except NameError: - # Python 2.5 and lower - bytes = str ILLEGAL_TABLE_TYPE_WITH_KEY = """\ Table type {0!r} for key {1!r} not handled by amqp. [value: {2!r}] @@ -174,6 +162,8 @@ class AMQPReader(object): val = self.read_bit() elif ftype == 100: val = self.read_float() + elif ftype == 86: # 'V' + val = None else: raise FrameSyntaxError( 'Unknown value in table: {0!r} ({1!r})'.format( @@ -357,6 +347,8 @@ class AMQPWriter(object): elif isinstance(v, (list, tuple)): self.write(b'A') self.write_array(v) + elif v is None: + self.write(b'V') else: err = (ILLEGAL_TABLE_TYPE_WITH_KEY.format(type(v), k, v) if k else ILLEGAL_TABLE_TYPE.format(type(v), v)) diff --git a/awx/lib/site-packages/amqp/transport.py b/awx/lib/site-packages/amqp/transport.py index 975ced16b7..8acdd4afc6 100644 --- a/awx/lib/site-packages/amqp/transport.py +++ b/awx/lib/site-packages/amqp/transport.py @@ -1,9 +1,3 @@ -""" -Read/Write AMQP frames over network transports. - -2009-01-14 Barry Pederson - -""" # Copyright (C) 2009 Barry Pederson # # This library is free software; you can redistribute it and/or @@ -24,6 +18,7 @@ from __future__ import absolute_import import errno import re import socket +import ssl # Jython does not have this attribute try: @@ -31,27 +26,18 @@ try: except ImportError: # pragma: no cover from socket import IPPROTO_TCP as SOL_TCP # noqa -# -# See if Python 2.6+ SSL support is available -# try: - import ssl - HAVE_PY26_SSL = True -except: - HAVE_PY26_SSL = False - -try: - bytes -except: - # Python 2.5 and lower - bytes = str + from ssl import SSLError +except ImportError: + class SSLError(Exception): # noqa + pass from struct import pack, unpack from .exceptions import UnexpectedFrame from .utils import get_errno, set_cloexec -_UNAVAIL = errno.EAGAIN, errno.EINTR +_UNAVAIL = errno.EAGAIN, errno.EINTR, errno.ENOENT AMQP_PORT = 5672 @@ -166,6 +152,10 @@ class _AbstractTransport(object): except socket.timeout: raise except (OSError, IOError, socket.error) as exc: + # Don't disconnect for ssl read time outs + # http://bugs.python.org/issue10272 + if isinstance(exc, SSLError) and 'timed out' in str(exc): + raise socket.timeout() if get_errno(exc) not in _UNAVAIL: self.connected = False raise @@ -200,22 +190,17 @@ class SSLTransport(_AbstractTransport): super(SSLTransport, self).__init__(host, connect_timeout) def _setup_transport(self): - """Wrap the socket in an SSL object, either the - new Python 2.6 version, or the older Python 2.5 and - lower version.""" - if HAVE_PY26_SSL: - if hasattr(self, 'sslopts'): - self.sock = ssl.wrap_socket(self.sock, **self.sslopts) - else: - self.sock = ssl.wrap_socket(self.sock) - self.sock.do_handshake() + """Wrap the socket in an SSL object.""" + if hasattr(self, 'sslopts'): + self.sock = ssl.wrap_socket(self.sock, **self.sslopts) else: - self.sock = socket.ssl(self.sock) + self.sock = ssl.wrap_socket(self.sock) + self.sock.do_handshake() self._quick_recv = self.sock.read def _shutdown_transport(self): """Unwrap a Python 2.6 SSL socket, so we can call shutdown()""" - if HAVE_PY26_SSL and self.sock is not None: + if self.sock is not None: try: unwrap = self.sock.unwrap except AttributeError: @@ -232,7 +217,7 @@ class SSLTransport(_AbstractTransport): try: while len(rbuf) < n: try: - s = recv(131072) # see note above + s = recv(n - len(rbuf)) # see note above except socket.error as exc: # ssl.sock.read may cause ENOENT if the # operation couldn't be performed (Issue celery#1414). @@ -275,7 +260,7 @@ class TCPTransport(_AbstractTransport): try: while len(rbuf) < n: try: - s = recv(131072) + s = recv(n - len(rbuf)) except socket.error as exc: if not initial and exc.errno in _errnos: continue diff --git a/awx/lib/site-packages/amqp/utils.py b/awx/lib/site-packages/amqp/utils.py index 994030b351..900d2aace4 100644 --- a/awx/lib/site-packages/amqp/utils.py +++ b/awx/lib/site-packages/amqp/utils.py @@ -11,7 +11,8 @@ except ImportError: class promise(object): if not hasattr(sys, 'pypy_version_info'): __slots__ = tuple( - 'fun args kwargs value ready failed on_success on_error'.split() + 'fun args kwargs value ready failed ' + ' on_success on_error calls'.split() ) def __init__(self, fun, args=(), kwargs=(), @@ -24,6 +25,7 @@ class promise(object): self.on_success = on_success self.on_error = on_error self.value = None + self.calls = 0 def __repr__(self): return '<$: {0.fun.__name__}(*{0.args!r}, **{0.kwargs!r})'.format( @@ -43,6 +45,7 @@ class promise(object): self.on_success(self.value) finally: self.ready = True + self.calls += 1 def then(self, callback=None, on_error=None): self.on_success = callback diff --git a/awx/lib/site-packages/billiard/__init__.py b/awx/lib/site-packages/billiard/__init__.py index b966c36fc6..3a15224653 100644 --- a/awx/lib/site-packages/billiard/__init__.py +++ b/awx/lib/site-packages/billiard/__init__.py @@ -19,7 +19,7 @@ from __future__ import absolute_import -VERSION = (3, 3, 0, 13) +VERSION = (3, 3, 0, 16) __version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:]) __author__ = 'R Oudkerk / Python Software Foundation' __author_email__ = 'python-dev@python.org' diff --git a/awx/lib/site-packages/billiard/_connection3.py b/awx/lib/site-packages/billiard/_connection3.py deleted file mode 100644 index f327f9244d..0000000000 --- a/awx/lib/site-packages/billiard/_connection3.py +++ /dev/null @@ -1,964 +0,0 @@ -# -# A higher level module for using sockets (or Windows named pipes) -# -# multiprocessing/connection.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# -from __future__ import absolute_import - -__all__ = ['Client', 'Listener', 'Pipe', 'wait'] - -import io -import os -import sys -import select -import socket -import struct -import errno -import tempfile -import itertools - -import _multiprocessing -from .compat import setblocking -from .exceptions import AuthenticationError, BufferTooShort -from .five import monotonic -from .util import get_temp_dir, Finalize, sub_debug -from .reduction import ForkingPickler -try: - import _winapi - from _winapi import ( - WAIT_OBJECT_0, - WAIT_TIMEOUT, - INFINITE, - ) - # if we got here, we seem to be running on Windows. Handle probably - # missing WAIT_ABANDONED_0 constant: - try: - from _winapi import WAIT_ABANDONED_0 - except ImportError: - WAIT_ABANDONED_0 = 128 # _winapi seems to be not exporting - # this constant, fallback solution until - # exported in _winapi -except ImportError: - if sys.platform == 'win32': - raise - _winapi = None - -# -# -# - -BUFSIZE = 8192 -# A very generous timeout when it comes to local connections... -CONNECTION_TIMEOUT = 20. - -_mmap_counter = itertools.count() - -default_family = 'AF_INET' -families = ['AF_INET'] - -if hasattr(socket, 'AF_UNIX'): - default_family = 'AF_UNIX' - families += ['AF_UNIX'] - -if sys.platform == 'win32': - default_family = 'AF_PIPE' - families += ['AF_PIPE'] - - -def _init_timeout(timeout=CONNECTION_TIMEOUT): - return monotonic() + timeout - - -def _check_timeout(t): - return monotonic() > t - - -def arbitrary_address(family): - ''' - Return an arbitrary free address for the given family - ''' - if family == 'AF_INET': - return ('localhost', 0) - elif family == 'AF_UNIX': - return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) - elif family == 'AF_PIPE': - return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % - (os.getpid(), next(_mmap_counter))) - else: - raise ValueError('unrecognized family') - - -def _validate_family(family): - ''' - Checks if the family is valid for the current environment. - ''' - if sys.platform != 'win32' and family == 'AF_PIPE': - raise ValueError('Family %s is not recognized.' % family) - - if sys.platform == 'win32' and family == 'AF_UNIX': - # double check - if not hasattr(socket, family): - raise ValueError('Family %s is not recognized.' % family) - - -def address_type(address): - ''' - Return the types of the address - - This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' - ''' - if type(address) == tuple: - return 'AF_INET' - elif type(address) is str and address.startswith('\\\\'): - return 'AF_PIPE' - elif type(address) is str: - return 'AF_UNIX' - else: - raise ValueError('address type of %r unrecognized' % address) - -# -# Connection classes -# - - -class _ConnectionBase: - _handle = None - - def __init__(self, handle, readable=True, writable=True): - handle = handle.__index__() - if handle < 0: - raise ValueError("invalid handle") - if not readable and not writable: - raise ValueError( - "at least one of `readable` and `writable` must be True") - self._handle = handle - self._readable = readable - self._writable = writable - - # XXX should we use util.Finalize instead of a __del__? - - def __del__(self): - if self._handle is not None: - self._close() - - def _check_closed(self): - if self._handle is None: - raise OSError("handle is closed") - - def _check_readable(self): - if not self._readable: - raise OSError("connection is write-only") - - def _check_writable(self): - if not self._writable: - raise OSError("connection is read-only") - - def _bad_message_length(self): - if self._writable: - self._readable = False - else: - self.close() - raise OSError("bad message length") - - @property - def closed(self): - """True if the connection is closed""" - return self._handle is None - - @property - def readable(self): - """True if the connection is readable""" - return self._readable - - @property - def writable(self): - """True if the connection is writable""" - return self._writable - - def fileno(self): - """File descriptor or handle of the connection""" - self._check_closed() - return self._handle - - def close(self): - """Close the connection""" - if self._handle is not None: - try: - self._close() - finally: - self._handle = None - - def send_bytes(self, buf, offset=0, size=None): - """Send the bytes data from a bytes-like object""" - self._check_closed() - self._check_writable() - m = memoryview(buf) - # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) - if m.itemsize > 1: - m = memoryview(bytes(m)) - n = len(m) - if offset < 0: - raise ValueError("offset is negative") - if n < offset: - raise ValueError("buffer length < offset") - if size is None: - size = n - offset - elif size < 0: - raise ValueError("size is negative") - elif offset + size > n: - raise ValueError("buffer length < offset + size") - self._send_bytes(m[offset:offset + size]) - - def send(self, obj): - """Send a (picklable) object""" - self._check_closed() - self._check_writable() - self._send_bytes(ForkingPickler.dumps(obj)) - - def recv_bytes(self, maxlength=None): - """ - Receive bytes data as a bytes object. - """ - self._check_closed() - self._check_readable() - if maxlength is not None and maxlength < 0: - raise ValueError("negative maxlength") - buf = self._recv_bytes(maxlength) - if buf is None: - self._bad_message_length() - return buf.getvalue() - - def recv_bytes_into(self, buf, offset=0): - """ - Receive bytes data into a writeable buffer-like object. - Return the number of bytes read. - """ - self._check_closed() - self._check_readable() - with memoryview(buf) as m: - # Get bytesize of arbitrary buffer - itemsize = m.itemsize - bytesize = itemsize * len(m) - if offset < 0: - raise ValueError("negative offset") - elif offset > bytesize: - raise ValueError("offset too large") - result = self._recv_bytes() - size = result.tell() - if bytesize < offset + size: - raise BufferTooShort(result.getvalue()) - # Message can fit in dest - result.seek(0) - result.readinto( - m[offset // itemsize:(offset + size) // itemsize] - ) - return size - - def recv_payload(self): - return self._recv_bytes().getbuffer() - - def recv(self): - """Receive a (picklable) object""" - self._check_closed() - self._check_readable() - buf = self._recv_bytes() - return ForkingPickler.loads(buf.getbuffer()) - - def poll(self, timeout=0.0): - """Whether there is any input available to be read""" - self._check_closed() - self._check_readable() - return self._poll(timeout) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - self.close() - - -if _winapi: - - class PipeConnection(_ConnectionBase): - """ - Connection class based on a Windows named pipe. - Overlapped I/O is used, so the handles must have been created - with FILE_FLAG_OVERLAPPED. - """ - _got_empty_message = False - - def _close(self, _CloseHandle=_winapi.CloseHandle): - _CloseHandle(self._handle) - - def _send_bytes(self, buf): - ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) - try: - if err == _winapi.ERROR_IO_PENDING: - waitres = _winapi.WaitForMultipleObjects( - [ov.event], False, INFINITE) - assert waitres == WAIT_OBJECT_0 - except: - ov.cancel() - raise - finally: - nwritten, err = ov.GetOverlappedResult(True) - assert err == 0 - assert nwritten == len(buf) - - def _recv_bytes(self, maxsize=None): - if self._got_empty_message: - self._got_empty_message = False - return io.BytesIO() - else: - bsize = 128 if maxsize is None else min(maxsize, 128) - try: - ov, err = _winapi.ReadFile(self._handle, bsize, - overlapped=True) - try: - if err == _winapi.ERROR_IO_PENDING: - waitres = _winapi.WaitForMultipleObjects( - [ov.event], False, INFINITE) - assert waitres == WAIT_OBJECT_0 - except: - ov.cancel() - raise - finally: - nread, err = ov.GetOverlappedResult(True) - if err == 0: - f = io.BytesIO() - f.write(ov.getbuffer()) - return f - elif err == _winapi.ERROR_MORE_DATA: - return self._get_more_data(ov, maxsize) - except OSError as e: - if e.winerror == _winapi.ERROR_BROKEN_PIPE: - raise EOFError - else: - raise - raise RuntimeError( - "shouldn't get here; expected KeyboardInterrupt" - ) - - def _poll(self, timeout): - if (self._got_empty_message or - _winapi.PeekNamedPipe(self._handle)[0] != 0): - return True - return bool(wait([self], timeout)) - - def _get_more_data(self, ov, maxsize): - buf = ov.getbuffer() - f = io.BytesIO() - f.write(buf) - left = _winapi.PeekNamedPipe(self._handle)[1] - assert left > 0 - if maxsize is not None and len(buf) + left > maxsize: - self._bad_message_length() - ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) - rbytes, err = ov.GetOverlappedResult(True) - assert err == 0 - assert rbytes == left - f.write(ov.getbuffer()) - return f - - -class Connection(_ConnectionBase): - """ - Connection class based on an arbitrary file descriptor (Unix only), or - a socket handle (Windows). - """ - - if _winapi: - def _close(self, _close=_multiprocessing.closesocket): - _close(self._handle) - _write = _multiprocessing.send - _read = _multiprocessing.recv - else: - def _close(self, _close=os.close): # noqa - _close(self._handle) - _write = os.write - _read = os.read - - def send_offset(self, buf, offset, write=_write): - return write(self._handle, buf[offset:]) - - def _send(self, buf, write=_write): - remaining = len(buf) - while True: - try: - n = write(self._handle, buf) - except OSError as exc: - if exc.errno == errno.EINTR: - continue - raise - remaining -= n - if remaining == 0: - break - buf = buf[n:] - - def setblocking(self, blocking): - setblocking(self._handle, blocking) - - def _recv(self, size, read=_read): - buf = io.BytesIO() - handle = self._handle - remaining = size - while remaining > 0: - try: - chunk = read(handle, remaining) - except OSError as exc: - if exc.errno == errno.EINTR: - continue - raise - n = len(chunk) - if n == 0: - if remaining == size: - raise EOFError - else: - raise OSError("got end of file during message") - buf.write(chunk) - remaining -= n - return buf - - def _send_bytes(self, buf): - # For wire compatibility with 3.2 and lower - n = len(buf) - self._send(struct.pack("!i", n)) - # The condition is necessary to avoid "broken pipe" errors - # when sending a 0-length buffer if the other end closed the pipe. - if n > 0: - self._send(buf) - - def _recv_bytes(self, maxsize=None): - buf = self._recv(4) - size, = struct.unpack("!i", buf.getvalue()) - if maxsize is not None and size > maxsize: - return None - return self._recv(size) - - def _poll(self, timeout): - r = wait([self], timeout) - return bool(r) - - -# -# Public functions -# - -class Listener(object): - ''' - Returns a listener object. - - This is a wrapper for a bound socket which is 'listening' for - connections, or for a Windows named pipe. - ''' - def __init__(self, address=None, family=None, backlog=1, authkey=None): - family = (family or (address and address_type(address)) - or default_family) - address = address or arbitrary_address(family) - - _validate_family(family) - if family == 'AF_PIPE': - self._listener = PipeListener(address, backlog) - else: - self._listener = SocketListener(address, family, backlog) - - if authkey is not None and not isinstance(authkey, bytes): - raise TypeError('authkey should be a byte string') - - self._authkey = authkey - - def accept(self): - ''' - Accept a connection on the bound socket or named pipe of `self`. - - Returns a `Connection` object. - ''' - if self._listener is None: - raise OSError('listener is closed') - c = self._listener.accept() - if self._authkey: - deliver_challenge(c, self._authkey) - answer_challenge(c, self._authkey) - return c - - def close(self): - ''' - Close the bound socket or named pipe of `self`. - ''' - if self._listener is not None: - self._listener.close() - self._listener = None - - address = property(lambda self: self._listener._address) - last_accepted = property(lambda self: self._listener._last_accepted) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - self.close() - - -def Client(address, family=None, authkey=None): - ''' - Returns a connection to the address of a `Listener` - ''' - family = family or address_type(address) - _validate_family(family) - if family == 'AF_PIPE': - c = PipeClient(address) - else: - c = SocketClient(address) - - if authkey is not None and not isinstance(authkey, bytes): - raise TypeError('authkey should be a byte string') - - if authkey is not None: - answer_challenge(c, authkey) - deliver_challenge(c, authkey) - - return c - - -if sys.platform != 'win32': - - def Pipe(duplex=True, rnonblock=False, wnonblock=False): - ''' - Returns pair of connection objects at either end of a pipe - ''' - if duplex: - s1, s2 = socket.socketpair() - s1.setblocking(not rnonblock) - s2.setblocking(not wnonblock) - c1 = Connection(s1.detach()) - c2 = Connection(s2.detach()) - else: - fd1, fd2 = os.pipe() - if rnonblock: - setblocking(fd1, 0) - if wnonblock: - setblocking(fd2, 0) - c1 = Connection(fd1, writable=False) - c2 = Connection(fd2, readable=False) - - return c1, c2 - -else: - from billiard.forking import duplicate - - def Pipe(duplex=True, rnonblock=False, wnonblock=False): # noqa - ''' - Returns pair of connection objects at either end of a pipe - ''' - address = arbitrary_address('AF_PIPE') - if duplex: - openmode = _winapi.PIPE_ACCESS_DUPLEX - access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE - obsize, ibsize = BUFSIZE, BUFSIZE - else: - openmode = _winapi.PIPE_ACCESS_INBOUND - access = _winapi.GENERIC_WRITE - obsize, ibsize = 0, BUFSIZE - - h1 = _winapi.CreateNamedPipe( - address, openmode | _winapi.FILE_FLAG_OVERLAPPED | - _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, - _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | - _winapi.PIPE_WAIT, - 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL - ) - h2 = _winapi.CreateFile( - address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, - _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL - ) - _winapi.SetNamedPipeHandleState( - h2, _winapi.PIPE_READMODE_MESSAGE, None, None - ) - - overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) - _, err = overlapped.GetOverlappedResult(True) - assert err == 0 - - c1 = PipeConnection(duplicate(h1, inheritable=True), writable=duplex) - c2 = PipeConnection(duplicate(h2, inheritable=True), readable=duplex) - _winapi.CloseHandle(h1) - _winapi.CloseHandle(h2) - return c1, c2 - -# -# Definitions for connections based on sockets -# - - -class SocketListener(object): - ''' - Representation of a socket which is bound to an address and listening - ''' - def __init__(self, address, family, backlog=1): - self._socket = socket.socket(getattr(socket, family)) - try: - # SO_REUSEADDR has different semantics on Windows (issue #2550). - if os.name == 'posix': - self._socket.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - self._socket.setblocking(True) - self._socket.bind(address) - self._socket.listen(backlog) - self._address = self._socket.getsockname() - except OSError: - self._socket.close() - raise - self._family = family - self._last_accepted = None - - if family == 'AF_UNIX': - self._unlink = Finalize( - self, os.unlink, args=(address, ), exitpriority=0 - ) - else: - self._unlink = None - - def accept(self): - while True: - try: - s, self._last_accepted = self._socket.accept() - except OSError as exc: - if exc.errno == errno.EINTR: - continue - raise - else: - break - s.setblocking(True) - return Connection(s.detach()) - - def close(self): - self._socket.close() - if self._unlink is not None: - self._unlink() - - -def SocketClient(address): - ''' - Return a connection object connected to the socket given by `address` - ''' - family = address_type(address) - with socket.socket(getattr(socket, family)) as s: - s.setblocking(True) - s.connect(address) - return Connection(s.detach()) - -# -# Definitions for connections based on named pipes -# - -if sys.platform == 'win32': - - class PipeListener(object): - ''' - Representation of a named pipe - ''' - def __init__(self, address, backlog=None): - self._address = address - self._handle_queue = [self._new_handle(first=True)] - - self._last_accepted = None - sub_debug('listener created with address=%r', self._address) - self.close = Finalize( - self, PipeListener._finalize_pipe_listener, - args=(self._handle_queue, self._address), exitpriority=0 - ) - - def _new_handle(self, first=False): - flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED - if first: - flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE - return _winapi.CreateNamedPipe( - self._address, flags, - _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | - _winapi.PIPE_WAIT, - _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, - _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL - ) - - def accept(self): - self._handle_queue.append(self._new_handle()) - handle = self._handle_queue.pop(0) - try: - ov = _winapi.ConnectNamedPipe(handle, overlapped=True) - except OSError as e: - if e.winerror != _winapi.ERROR_NO_DATA: - raise - # ERROR_NO_DATA can occur if a client has already connected, - # written data and then disconnected -- see Issue 14725. - else: - try: - _winapi.WaitForMultipleObjects([ov.event], False, INFINITE) - except: - ov.cancel() - _winapi.CloseHandle(handle) - raise - finally: - _, err = ov.GetOverlappedResult(True) - assert err == 0 - return PipeConnection(handle) - - @staticmethod - def _finalize_pipe_listener(queue, address): - sub_debug('closing listener with address=%r', address) - for handle in queue: - _winapi.CloseHandle(handle) - - def PipeClient(address, - errors=(_winapi.ERROR_SEM_TIMEOUT, - _winapi.ERROR_PIPE_BUSY)): - ''' - Return a connection object connected to the pipe given by `address` - ''' - t = _init_timeout() - while 1: - try: - _winapi.WaitNamedPipe(address, 1000) - h = _winapi.CreateFile( - address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, - 0, _winapi.NULL, _winapi.OPEN_EXISTING, - _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL - ) - except OSError as e: - if e.winerror not in errors or _check_timeout(t): - raise - else: - break - else: - raise - - _winapi.SetNamedPipeHandleState( - h, _winapi.PIPE_READMODE_MESSAGE, None, None - ) - return PipeConnection(h) - -# -# Authentication stuff -# - -MESSAGE_LENGTH = 20 - -CHALLENGE = b'#CHALLENGE#' -WELCOME = b'#WELCOME#' -FAILURE = b'#FAILURE#' - - -def deliver_challenge(connection, authkey): - import hmac - assert isinstance(authkey, bytes) - message = os.urandom(MESSAGE_LENGTH) - connection.send_bytes(CHALLENGE + message) - digest = hmac.new(authkey, message).digest() - response = connection.recv_bytes(256) # reject large message - if response == digest: - connection.send_bytes(WELCOME) - else: - connection.send_bytes(FAILURE) - raise AuthenticationError('digest received was wrong') - - -def answer_challenge(connection, authkey): - import hmac - assert isinstance(authkey, bytes) - message = connection.recv_bytes(256) # reject large message - assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message - message = message[len(CHALLENGE):] - digest = hmac.new(authkey, message).digest() - connection.send_bytes(digest) - response = connection.recv_bytes(256) # reject large message - if response != WELCOME: - raise AuthenticationError('digest sent was rejected') - -# -# Support for using xmlrpclib for serialization -# - - -class ConnectionWrapper(object): - - def __init__(self, conn, dumps, loads): - self._conn = conn - self._dumps = dumps - self._loads = loads - for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): - obj = getattr(conn, attr) - setattr(self, attr, obj) - - def send(self, obj): - s = self._dumps(obj) - self._conn.send_bytes(s) - - def recv(self): - s = self._conn.recv_bytes() - return self._loads(s) - - -def _xml_dumps(obj): - return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') # noqa - - -def _xml_loads(s): - (obj,), method = xmlrpclib.loads(s.decode('utf-8')) # noqa - return obj - - -class XmlListener(Listener): - def accept(self): - global xmlrpclib - import xmlrpc.client as xmlrpclib # noqa - obj = Listener.accept(self) - return ConnectionWrapper(obj, _xml_dumps, _xml_loads) - - -def XmlClient(*args, **kwds): - global xmlrpclib - import xmlrpc.client as xmlrpclib # noqa - return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) - -# -# Wait -# - -if sys.platform == 'win32': - - def _exhaustive_wait(handles, timeout): - # Return ALL handles which are currently signalled. (Only - # returning the first signalled might create starvation issues.) - L = list(handles) - ready = [] - while L: - res = _winapi.WaitForMultipleObjects(L, False, timeout) - if res == WAIT_TIMEOUT: - break - elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): - res -= WAIT_OBJECT_0 - elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): - res -= WAIT_ABANDONED_0 - else: - raise RuntimeError('Should not get here') - ready.append(L[res]) - L = L[res+1:] - timeout = 0 - return ready - - _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} - - def wait(object_list, timeout=None): - ''' - Wait till an object in object_list is ready/readable. - - Returns list of those objects in object_list which are ready/readable. - ''' - if timeout is None: - timeout = INFINITE - elif timeout < 0: - timeout = 0 - else: - timeout = int(timeout * 1000 + 0.5) - - object_list = list(object_list) - waithandle_to_obj = {} - ov_list = [] - ready_objects = set() - ready_handles = set() - - try: - for o in object_list: - try: - fileno = getattr(o, 'fileno') - except AttributeError: - waithandle_to_obj[o.__index__()] = o - else: - # start an overlapped read of length zero - try: - ov, err = _winapi.ReadFile(fileno(), 0, True) - except OSError as e: - err = e.winerror - if err not in _ready_errors: - raise - if err == _winapi.ERROR_IO_PENDING: - ov_list.append(ov) - waithandle_to_obj[ov.event] = o - else: - # If o.fileno() is an overlapped pipe handle and - # err == 0 then there is a zero length message - # in the pipe, but it HAS NOT been consumed. - ready_objects.add(o) - timeout = 0 - - ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) - finally: - # request that overlapped reads stop - for ov in ov_list: - ov.cancel() - - # wait for all overlapped reads to stop - for ov in ov_list: - try: - _, err = ov.GetOverlappedResult(True) - except OSError as e: - err = e.winerror - if err not in _ready_errors: - raise - if err != _winapi.ERROR_OPERATION_ABORTED: - o = waithandle_to_obj[ov.event] - ready_objects.add(o) - if err == 0: - # If o.fileno() is an overlapped pipe handle then - # a zero length message HAS been consumed. - if hasattr(o, '_got_empty_message'): - o._got_empty_message = True - - ready_objects.update(waithandle_to_obj[h] for h in ready_handles) - return [o for o in object_list if o in ready_objects] - -else: - - if hasattr(select, 'poll'): - def _poll(fds, timeout): - if timeout is not None: - timeout = int(timeout * 1000) # timeout is in milliseconds - fd_map = {} - pollster = select.poll() - for fd in fds: - pollster.register(fd, select.POLLIN) - if hasattr(fd, 'fileno'): - fd_map[fd.fileno()] = fd - else: - fd_map[fd] = fd - ls = [] - for fd, event in pollster.poll(timeout): - if event & select.POLLNVAL: - raise ValueError('invalid file descriptor %i' % fd) - ls.append(fd_map[fd]) - return ls - else: - def _poll(fds, timeout): # noqa - return select.select(fds, [], [], timeout)[0] - - def wait(object_list, timeout=None): # noqa - ''' - Wait till an object in object_list is ready/readable. - - Returns list of those objects in object_list which are ready/readable. - ''' - if timeout is not None: - if timeout <= 0: - return _poll(object_list, 0) - else: - deadline = monotonic() + timeout - while True: - try: - return _poll(object_list, timeout) - except OSError as e: - if e.errno != errno.EINTR: - raise - if timeout is not None: - timeout = deadline - monotonic() diff --git a/awx/lib/site-packages/billiard/_reduction3.py b/awx/lib/site-packages/billiard/_reduction3.py deleted file mode 100644 index c6d9bd272d..0000000000 --- a/awx/lib/site-packages/billiard/_reduction3.py +++ /dev/null @@ -1,249 +0,0 @@ -# -# Module which deals with pickling of objects. -# -# multiprocessing/reduction.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# -from __future__ import absolute_import - -import copyreg -import functools -import io -import os -import pickle -import socket -import sys - -__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] - - -HAVE_SEND_HANDLE = (sys.platform == 'win32' or - (hasattr(socket, 'CMSG_LEN') and - hasattr(socket, 'SCM_RIGHTS') and - hasattr(socket.socket, 'sendmsg'))) - -# -# Pickler subclass -# - - -class ForkingPickler(pickle.Pickler): - '''Pickler subclass used by multiprocessing.''' - _extra_reducers = {} - _copyreg_dispatch_table = copyreg.dispatch_table - - def __init__(self, *args): - super().__init__(*args) - self.dispatch_table = self._copyreg_dispatch_table.copy() - self.dispatch_table.update(self._extra_reducers) - - @classmethod - def register(cls, type, reduce): - '''Register a reduce function for a type.''' - cls._extra_reducers[type] = reduce - - @classmethod - def dumps(cls, obj, protocol=None): - buf = io.BytesIO() - cls(buf, protocol).dump(obj) - return buf.getbuffer() - - loads = pickle.loads - -register = ForkingPickler.register - - -def dump(obj, file, protocol=None): - '''Replacement for pickle.dump() using ForkingPickler.''' - ForkingPickler(file, protocol).dump(obj) - -# -# Platform specific definitions -# - -if sys.platform == 'win32': - # Windows - __all__ += ['DupHandle', 'duplicate', 'steal_handle'] - import _winapi - - def duplicate(handle, target_process=None, inheritable=False): - '''Duplicate a handle. (target_process is a handle not a pid!)''' - if target_process is None: - target_process = _winapi.GetCurrentProcess() - return _winapi.DuplicateHandle( - _winapi.GetCurrentProcess(), handle, target_process, - 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) - - def steal_handle(source_pid, handle): - '''Steal a handle from process identified by source_pid.''' - source_process_handle = _winapi.OpenProcess( - _winapi.PROCESS_DUP_HANDLE, False, source_pid) - try: - return _winapi.DuplicateHandle( - source_process_handle, handle, - _winapi.GetCurrentProcess(), 0, False, - _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) - finally: - _winapi.CloseHandle(source_process_handle) - - def send_handle(conn, handle, destination_pid): - '''Send a handle over a local connection.''' - dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) - conn.send(dh) - - def recv_handle(conn): - '''Receive a handle over a local connection.''' - return conn.recv().detach() - - class DupHandle(object): - '''Picklable wrapper for a handle.''' - def __init__(self, handle, access, pid=None): - if pid is None: - # We just duplicate the handle in the current process and - # let the receiving process steal the handle. - pid = os.getpid() - proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) - try: - self._handle = _winapi.DuplicateHandle( - _winapi.GetCurrentProcess(), - handle, proc, access, False, 0) - finally: - _winapi.CloseHandle(proc) - self._access = access - self._pid = pid - - def detach(self): - '''Get the handle. This should only be called once.''' - # retrieve handle from process which currently owns it - if self._pid == os.getpid(): - # The handle has already been duplicated for this process. - return self._handle - # We must steal the handle from the process whose pid is self._pid. - proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, - self._pid) - try: - return _winapi.DuplicateHandle( - proc, self._handle, _winapi.GetCurrentProcess(), - self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) - finally: - _winapi.CloseHandle(proc) - -else: - # Unix - __all__ += ['DupFd', 'sendfds', 'recvfds'] - import array - - # On MacOSX we should acknowledge receipt of fds -- see Issue14669 - ACKNOWLEDGE = sys.platform == 'darwin' - - def sendfds(sock, fds): - '''Send an array of fds over an AF_UNIX socket.''' - fds = array.array('i', fds) - msg = bytes([len(fds) % 256]) - sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) - if ACKNOWLEDGE and sock.recv(1) != b'A': - raise RuntimeError('did not receive acknowledgement of fd') - - def recvfds(sock, size): - '''Receive an array of fds over an AF_UNIX socket.''' - a = array.array('i') - bytes_size = a.itemsize * size - msg, ancdata, flags, addr = sock.recvmsg( - 1, socket.CMSG_LEN(bytes_size), - ) - if not msg and not ancdata: - raise EOFError - try: - if ACKNOWLEDGE: - sock.send(b'A') - if len(ancdata) != 1: - raise RuntimeError( - 'received %d items of ancdata' % len(ancdata), - ) - cmsg_level, cmsg_type, cmsg_data = ancdata[0] - if (cmsg_level == socket.SOL_SOCKET and - cmsg_type == socket.SCM_RIGHTS): - if len(cmsg_data) % a.itemsize != 0: - raise ValueError - a.frombytes(cmsg_data) - assert len(a) % 256 == msg[0] - return list(a) - except (ValueError, IndexError): - pass - raise RuntimeError('Invalid data received') - - def send_handle(conn, handle, destination_pid): # noqa - '''Send a handle over a local connection.''' - fd = conn.fileno() - with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: - sendfds(s, [handle]) - - def recv_handle(conn): # noqa - '''Receive a handle over a local connection.''' - fd = conn.fileno() - with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: - return recvfds(s, 1)[0] - - def DupFd(fd): - '''Return a wrapper for an fd.''' - from .forking import Popen - return Popen.duplicate_for_child(fd) - -# -# Try making some callable types picklable -# - - -def _reduce_method(m): - if m.__self__ is None: - return getattr, (m.__class__, m.__func__.__name__) - else: - return getattr, (m.__self__, m.__func__.__name__) - - -class _C: - def f(self): - pass -register(type(_C().f), _reduce_method) - - -def _reduce_method_descriptor(m): - return getattr, (m.__objclass__, m.__name__) -register(type(list.append), _reduce_method_descriptor) -register(type(int.__add__), _reduce_method_descriptor) - - -def _reduce_partial(p): - return _rebuild_partial, (p.func, p.args, p.keywords or {}) - - -def _rebuild_partial(func, args, keywords): - return functools.partial(func, *args, **keywords) -register(functools.partial, _reduce_partial) - -# -# Make sockets picklable -# - -if sys.platform == 'win32': - - def _reduce_socket(s): - from .resource_sharer import DupSocket - return _rebuild_socket, (DupSocket(s),) - - def _rebuild_socket(ds): - return ds.detach() - register(socket.socket, _reduce_socket) - -else: - - def _reduce_socket(s): # noqa - df = DupFd(s.fileno()) - return _rebuild_socket, (df, s.family, s.type, s.proto) - - def _rebuild_socket(df, family, type, proto): # noqa - fd = df.detach() - return socket.socket(family, type, proto, fileno=fd) - register(socket.socket, _reduce_socket) diff --git a/awx/lib/site-packages/billiard/compat.py b/awx/lib/site-packages/billiard/compat.py index 1300e33734..aac4b7c6fe 100644 --- a/awx/lib/site-packages/billiard/compat.py +++ b/awx/lib/site-packages/billiard/compat.py @@ -4,7 +4,7 @@ import errno import os import sys -from .five import builtins, range +from .five import range if sys.platform == 'win32': try: @@ -45,10 +45,9 @@ else: # non-posix platform if sys.version_info[0] == 3: bytes = bytes else: - try: - _bytes = builtins.bytes - except AttributeError: - _bytes = str + _bytes = bytes + + # the 'bytes' alias in Python2 does not support an encoding argument. class bytes(_bytes): # noqa diff --git a/awx/lib/site-packages/billiard/connection.py b/awx/lib/site-packages/billiard/connection.py index dda3ee5cc5..bd2e36eb14 100644 --- a/awx/lib/site-packages/billiard/connection.py +++ b/awx/lib/site-packages/billiard/connection.py @@ -5,9 +5,9 @@ import sys is_pypy = hasattr(sys, 'pypy_version_info') if sys.version_info[0] == 3: - from . import _connection3 as connection + from .py3 import connection else: - from . import _connection as connection # noqa + from .py2 import connection # noqa if is_pypy: diff --git a/awx/lib/site-packages/billiard/forking.py b/awx/lib/site-packages/billiard/forking.py index 4557359cab..57fc9795e4 100644 --- a/awx/lib/site-packages/billiard/forking.py +++ b/awx/lib/site-packages/billiard/forking.py @@ -433,6 +433,8 @@ def main(): os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__ except KeyError: pass + except AttributeError: + pass loglevel = os.environ.get("_MP_FORK_LOGLEVEL_") logfile = os.environ.get("_MP_FORK_LOGFILE_") or None format = os.environ.get("_MP_FORK_LOGFORMAT_") diff --git a/awx/lib/site-packages/billiard/pool.py b/awx/lib/site-packages/billiard/pool.py index 4b71d51427..468865c1b7 100644 --- a/awx/lib/site-packages/billiard/pool.py +++ b/awx/lib/site-packages/billiard/pool.py @@ -1096,7 +1096,8 @@ class Pool(object): if popen is None or exitcode is not None: # worker exited debug('Supervisor: cleaning up worker %d', i) - worker.join() + if popen is not None: + worker.join() debug('Supervisor: worked %d joined', i) cleaned[worker.pid] = worker exitcodes[worker.pid] = exitcode @@ -1591,7 +1592,8 @@ class Pool(object): if p.is_alive(): # worker has not yet exited debug('cleaning up worker %d', p.pid) - p.join() + if p._popen is not None: + p.join() debug('pool workers joined') @property diff --git a/awx/lib/site-packages/billiard/py2/__init__.py b/awx/lib/site-packages/billiard/py2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/billiard/_connection.py b/awx/lib/site-packages/billiard/py2/connection.py similarity index 96% rename from awx/lib/site-packages/billiard/_connection.py rename to awx/lib/site-packages/billiard/py2/connection.py index 1bd185d640..d2b865e23e 100644 --- a/awx/lib/site-packages/billiard/_connection.py +++ b/awx/lib/site-packages/billiard/py2/connection.py @@ -19,14 +19,14 @@ import time import tempfile import itertools -from . import AuthenticationError -from . import reduction -from ._ext import _billiard, win32 -from .compat import get_errno, bytes, setblocking -from .five import monotonic -from .forking import duplicate, close -from .reduction import ForkingPickler -from .util import get_temp_dir, Finalize, sub_debug, debug +from .. import AuthenticationError +from .. import reduction +from .._ext import _billiard, win32 +from ..compat import get_errno, setblocking, bytes as cbytes +from ..five import monotonic +from ..forking import duplicate, close +from ..reduction import ForkingPickler +from ..util import get_temp_dir, Finalize, sub_debug, debug try: WindowsError = WindowsError # noqa @@ -406,9 +406,9 @@ if sys.platform == 'win32': MESSAGE_LENGTH = 20 -CHALLENGE = bytes('#CHALLENGE#', 'ascii') -WELCOME = bytes('#WELCOME#', 'ascii') -FAILURE = bytes('#FAILURE#', 'ascii') +CHALLENGE = cbytes('#CHALLENGE#', 'ascii') +WELCOME = cbytes('#WELCOME#', 'ascii') +FAILURE = cbytes('#FAILURE#', 'ascii') def deliver_challenge(connection, authkey): diff --git a/awx/lib/site-packages/billiard/_reduction.py b/awx/lib/site-packages/billiard/py2/reduction.py similarity index 94% rename from awx/lib/site-packages/billiard/_reduction.py rename to awx/lib/site-packages/billiard/py2/reduction.py index 630923412f..b6cde35c86 100644 --- a/awx/lib/site-packages/billiard/_reduction.py +++ b/awx/lib/site-packages/billiard/py2/reduction.py @@ -19,9 +19,9 @@ import threading from pickle import Pickler -from . import current_process -from ._ext import _billiard, win32 -from .util import register_after_fork, debug, sub_debug +from .. import current_process +from .._ext import _billiard, win32 +from ..util import register_after_fork, debug, sub_debug is_win32 = sys.platform == 'win32' is_pypy = hasattr(sys, 'pypy_version_info') @@ -92,7 +92,7 @@ if sys.platform == 'win32': import _subprocess # noqa def send_handle(conn, handle, destination_pid): - from .forking import duplicate + from ..forking import duplicate process_handle = win32.OpenProcess( win32.PROCESS_ALL_ACCESS, False, destination_pid ) @@ -136,7 +136,7 @@ def _get_listener(): _lock.acquire() try: if _listener is None: - from .connection import Listener + from ..connection import Listener debug('starting listener and thread for sending handles') _listener = Listener(authkey=current_process().authkey) t = threading.Thread(target=_serve) @@ -149,7 +149,7 @@ def _get_listener(): def _serve(): - from .util import is_exiting, sub_warning + from ..util import is_exiting, sub_warning while 1: try: @@ -170,7 +170,7 @@ def _serve(): def reduce_handle(handle): - from .forking import Popen, duplicate + from ..forking import Popen, duplicate if Popen.thread_is_spawning(): return (None, Popen.duplicate_for_child(handle), True) dup_handle = duplicate(handle) @@ -180,7 +180,7 @@ def reduce_handle(handle): def rebuild_handle(pickled_data): - from .connection import Client + from ..connection import Client address, handle, inherited = pickled_data if inherited: return handle diff --git a/awx/lib/site-packages/billiard/reduction.py b/awx/lib/site-packages/billiard/reduction.py index 20d942dd4e..c334b3e90d 100644 --- a/awx/lib/site-packages/billiard/reduction.py +++ b/awx/lib/site-packages/billiard/reduction.py @@ -3,8 +3,8 @@ from __future__ import absolute_import import sys if sys.version_info[0] == 3: - from . import _reduction3 as reduction + from .py3 import reduction else: - from . import _reduction as reduction # noqa + from .py2 import reduction # noqa sys.modules[__name__] = reduction diff --git a/awx/lib/site-packages/boto/__init__.py b/awx/lib/site-packages/boto/__init__.py index 1b3c6517d8..35539123f2 100644 --- a/awx/lib/site-packages/boto/__init__.py +++ b/awx/lib/site-packages/boto/__init__.py @@ -37,7 +37,7 @@ import logging.config import urlparse from boto.exception import InvalidUriError -__version__ = '2.21.2' +__version__ = '2.27.0' Version = __version__ # for backware compatibility # http://bugs.python.org/issue7980 @@ -58,6 +58,7 @@ TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}') GENERATION_RE = re.compile(r'(?P.+)' r'#(?P[0-9]+)$') VERSION_RE = re.compile('(?P.+)#(?P.+)$') +ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json') def init_logging(): @@ -195,6 +196,11 @@ def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection` :return: A connection to Amazon's Auto Scaling Service + + :type use_block_device_types bool + :param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing + block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability + with the old incorrect style. """ from boto.ec2.autoscale import AutoScaleConnection return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, @@ -311,6 +317,25 @@ def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) +def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.rds2.layer1.RDSConnection` + :return: A connection to RDS + """ + from boto.rds2.layer1 import RDSConnection + return RDSConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string diff --git a/awx/lib/site-packages/boto/auth.py b/awx/lib/site-packages/boto/auth.py index 0f8b10cef7..62446eb37d 100644 --- a/awx/lib/site-packages/boto/auth.py +++ b/awx/lib/site-packages/boto/auth.py @@ -36,6 +36,7 @@ import copy import datetime from email.utils import formatdate import hmac +import os import sys import time import urllib @@ -220,7 +221,6 @@ class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys): Select the headers from the request that need to be included in the StringToSign. """ - headers_to_sign = {} headers_to_sign = {'Host': self.host} for name, value in http_request.headers.items(): lname = name.lower() @@ -329,7 +329,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys): parameter_names = sorted(http_request.params.keys()) pairs = [] for pname in parameter_names: - pval = str(http_request.params[pname]).encode('utf-8') + pval = boto.utils.get_utf8_value(http_request.params[pname]) pairs.append(urllib.quote(pname, safe='') + '=' + urllib.quote(pval, safe='-_~')) return '&'.join(pairs) @@ -341,7 +341,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys): return "" l = [] for param in sorted(http_request.params): - value = str(http_request.params[param]) + value = boto.utils.get_utf8_value(http_request.params[param]) l.append('%s=%s' % (urllib.quote(param, safe='-_.~'), urllib.quote(value, safe='-_.~'))) return '&'.join(l) @@ -358,9 +358,11 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys): for header in headers_to_sign: c_name = header.lower().strip() raw_value = headers_to_sign[header] - c_value = ' '.join(raw_value.strip().split()) + if '"' in raw_value: + c_value = raw_value.strip() + else: + c_value = ' '.join(raw_value.strip().split()) canonical.append('%s:%s' % (c_name, c_value)) - return '\n'.join(sorted(canonical)) def signed_headers(self, headers_to_sign): @@ -498,7 +500,10 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys): # Safe to modify req.path here since # the signature will use req.auth_path. req.path = req.path.split('?')[0] - req.path = req.path + '?' + qs + + if qs: + # Don't insert the '?' unless there's actually a query string + req.path = req.path + '?' + qs canonical_request = self.canonical_request(req) boto.log.debug('CanonicalRequest:\n%s' % canonical_request) string_to_sign = self.string_to_sign(req, canonical_request) @@ -534,7 +539,10 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler): # S3 does **NOT** do path normalization that SigV4 typically does. # Urlencode the path, **NOT** ``auth_path`` (because vhosting). path = urlparse.urlparse(http_request.path) - encoded = urllib.quote(path.path) + # Because some quoting may have already been applied, let's back it out. + unquoted = urllib.unquote(path.path) + # Requote, this time addressing all characters. + encoded = urllib.quote(unquoted) return encoded def host_header(self, host, http_request): @@ -889,6 +897,12 @@ def get_auth_handler(host, config, provider, requested_capability=None): def detect_potential_sigv4(func): def _wrapper(self): + if os.environ.get('EC2_USE_SIGV4', False): + return ['hmac-v4'] + + if boto.config.get('ec2', 'use-sigv4', False): + return ['hmac-v4'] + if hasattr(self, 'region'): if getattr(self.region, 'endpoint', ''): if '.cn-' in self.region.endpoint: @@ -900,6 +914,12 @@ def detect_potential_sigv4(func): def detect_potential_s3sigv4(func): def _wrapper(self): + if os.environ.get('S3_USE_SIGV4', False): + return ['hmac-v4-s3'] + + if boto.config.get('s3', 'use-sigv4', False): + return ['hmac-v4-s3'] + if hasattr(self, 'host'): if '.cn-' in self.host: return ['hmac-v4-s3'] diff --git a/awx/lib/site-packages/boto/beanstalk/__init__.py b/awx/lib/site-packages/boto/beanstalk/__init__.py index 904d855edf..c3928bcd1b 100644 --- a/awx/lib/site-packages/boto/beanstalk/__init__.py +++ b/awx/lib/site-packages/boto/beanstalk/__init__.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,31 +31,10 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ import boto.beanstalk.layer1 - return [RegionInfo(name='us-east-1', - endpoint='elasticbeanstalk.us-east-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='us-west-1', - endpoint='elasticbeanstalk.us-west-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='us-west-2', - endpoint='elasticbeanstalk.us-west-2.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='ap-northeast-1', - endpoint='elasticbeanstalk.ap-northeast-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='ap-southeast-1', - endpoint='elasticbeanstalk.ap-southeast-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='ap-southeast-2', - endpoint='elasticbeanstalk.ap-southeast-2.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='eu-west-1', - endpoint='elasticbeanstalk.eu-west-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - RegionInfo(name='sa-east-1', - endpoint='elasticbeanstalk.sa-east-1.amazonaws.com', - connection_cls=boto.beanstalk.layer1.Layer1), - ] + return get_regions( + 'elasticbeanstalk', + connection_cls=boto.beanstalk.layer1.Layer1 + ) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/beanstalk/layer1.py b/awx/lib/site-packages/boto/beanstalk/layer1.py index f70a6b28b6..5963f50e9c 100644 --- a/awx/lib/site-packages/boto/beanstalk/layer1.py +++ b/awx/lib/site-packages/boto/beanstalk/layer1.py @@ -40,7 +40,7 @@ class Layer1(AWSQueryConnection): proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - api_version=None, security_token=None): + api_version=None, security_token=None, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) @@ -51,7 +51,7 @@ class Layer1(AWSQueryConnection): proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, - security_token) + security_token, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] diff --git a/awx/lib/site-packages/boto/cloudformation/__init__.py b/awx/lib/site-packages/boto/cloudformation/__init__.py index cf6679f944..84047e2b0b 100644 --- a/awx/lib/site-packages/boto/cloudformation/__init__.py +++ b/awx/lib/site-packages/boto/cloudformation/__init__.py @@ -21,19 +21,9 @@ # IN THE SOFTWARE. from connection import CloudFormationConnection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions -RegionData = { - 'us-east-1': 'cloudformation.us-east-1.amazonaws.com', - 'us-west-1': 'cloudformation.us-west-1.amazonaws.com', - 'us-west-2': 'cloudformation.us-west-2.amazonaws.com', - 'sa-east-1': 'cloudformation.sa-east-1.amazonaws.com', - 'eu-west-1': 'cloudformation.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'cloudformation.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'cloudformation.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'cloudformation.ap-southeast-2.amazonaws.com', - 'cn-north-1': 'cloudformation.cn-north-1.amazonaws.com.cn', -} +RegionData = load_regions().get('cloudformation') def regions(): @@ -43,13 +33,10 @@ def regions(): :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=CloudFormationConnection) - regions.append(region) - return regions + return get_regions( + 'cloudformation', + connection_cls=CloudFormationConnection + ) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/cloudformation/connection.py b/awx/lib/site-packages/boto/cloudformation/connection.py index 5e6325d523..40ff8b63bd 100644 --- a/awx/lib/site-packages/boto/cloudformation/connection.py +++ b/awx/lib/site-packages/boto/cloudformation/connection.py @@ -1,4 +1,5 @@ # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -29,9 +30,28 @@ from boto.compat import json class CloudFormationConnection(AWSQueryConnection): - """ - A Connection to the CloudFormation Service. + AWS CloudFormation + AWS CloudFormation enables you to create and manage AWS + infrastructure deployments predictably and repeatedly. AWS + CloudFormation helps you leverage AWS products such as Amazon EC2, + EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, + highly scalable, cost effective applications without worrying + about creating and configuring the underlying AWS infrastructure. + + With AWS CloudFormation, you declare all of your resources and + dependencies in a template file. The template defines a collection + of resources as a single unit called a stack. AWS CloudFormation + creates and deletes all member resources of the stack together and + manages all dependencies between the resources for you. + + For more information about this product, go to the `CloudFormation + Product Page`_. + + Amazon CloudFormation makes use of other AWS products. If you need + additional technical information about a specific AWS product, you + can find the product's technical documentation at + `http://aws.amazon.com/documentation/`_. """ APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15') DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1') @@ -52,7 +72,8 @@ class CloudFormationConnection(AWSQueryConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - converter=None, security_token=None, validate_certs=True): + converter=None, security_token=None, validate_certs=True, + profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, CloudFormationConnection) @@ -64,7 +85,8 @@ class CloudFormationConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -74,50 +96,117 @@ class CloudFormationConnection(AWSQueryConnection): return {True: "true", False: "false"}[v] def _build_create_or_update_params(self, stack_name, template_body, - template_url, parameters, - notification_arns, disable_rollback, - timeout_in_minutes, capabilities, tags): + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, on_failure, stack_policy_body, + stack_policy_url, tags, stack_policy_during_update_body=None, + stack_policy_during_update_url=None): """ Helper that creates JSON parameters needed by a Stack Create or Stack Update call. :type stack_name: string - :param stack_name: The name of the Stack, must be unique amoung running - Stacks + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. :type template_body: string - :param template_body: The template body (JSON string) + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. :type template_url: string - :param template_url: An S3 URL of a stored template JSON document. If - both the template_body and template_url are - specified, the template_body takes precedence + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. - :type parameters: list of tuples - :param parameters: A list of (key, value) pairs for template input - parameters. + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. - :type notification_arns: list of strings - :param notification_arns: A list of SNS topics to send Stack event - notifications to. + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` - :type disable_rollback: bool - :param disable_rollback: Indicates whether or not to rollback on - failure. + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. - :type timeout_in_minutes: int - :param timeout_in_minutes: Maximum amount of time to let the Stack - spend creating itself. If this timeout is exceeded, - the Stack will enter the CREATE_FAILED state. + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). :type capabilities: list - :param capabilities: The list of capabilities you want to allow in - the stack. Currently, the only valid capability is - 'CAPABILITY_IAM'. + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. - :type tags: dict - :param tags: A dictionary of (key, value) pairs of tags to - associate with this stack. + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: list + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. :rtype: dict :return: JSON parameters represented as a Python dict. @@ -131,7 +220,7 @@ class CloudFormationConnection(AWSQueryConnection): if template_body and template_url: boto.log.warning("If both TemplateBody and TemplateURL are" " specified, only TemplateBody will be honored by the API") - if len(parameters) > 0: + if parameters and len(parameters) > 0: for i, (key, value) in enumerate(parameters): params['Parameters.member.%d.ParameterKey' % (i + 1)] = key params['Parameters.member.%d.ParameterValue' % (i + 1)] = value @@ -142,107 +231,224 @@ class CloudFormationConnection(AWSQueryConnection): for i, (key, value) in enumerate(tags.items()): params['Tags.member.%d.Key' % (i + 1)] = key params['Tags.member.%d.Value' % (i + 1)] = value - if len(notification_arns) > 0: + if notification_arns and len(notification_arns) > 0: self.build_list_params(params, notification_arns, "NotificationARNs.member") if timeout_in_minutes: params['TimeoutInMinutes'] = int(timeout_in_minutes) + if disable_rollback is not None: + params['DisableRollback'] = str( + disable_rollback).lower() + if on_failure is not None: + params['OnFailure'] = on_failure + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + if stack_policy_during_update_body is not None: + params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body + if stack_policy_during_update_url is not None: + params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url return params - def create_stack(self, stack_name, template_body=None, template_url=None, - parameters=[], notification_arns=[], disable_rollback=False, - timeout_in_minutes=None, capabilities=None, tags=None): + def _do_request(self, call, params, path, method): """ - Creates a CloudFormation Stack as specified by the template. + Do a request via ``self.make_request`` and parse the JSON response. - :type stack_name: string - :param stack_name: The name of the Stack, must be unique amoung running - Stacks + :type call: string + :param call: Call name, e.g. ``CreateStack`` - :type template_body: string - :param template_body: The template body (JSON string) + :type params: dict + :param params: Dictionary of call parameters - :type template_url: string - :param template_url: An S3 URL of a stored template JSON document. If - both the template_body and template_url are - specified, the template_body takes precedence + :type path: string + :param path: Server path - :type parameters: list of tuples - :param parameters: A list of (key, value) pairs for template input - parameters. + :type method: string + :param method: HTTP method to use - :type notification_arns: list of strings - :param notification_arns: A list of SNS topics to send Stack event - notifications to. - - :type disable_rollback: bool - :param disable_rollback: Indicates whether or not to rollback on - failure. - - :type timeout_in_minutes: int - :param timeout_in_minutes: Maximum amount of time to let the Stack - spend creating itself. If this timeout is exceeded, - the Stack will enter the CREATE_FAILED state. - - :type capabilities: list - :param capabilities: The list of capabilities you want to allow in - the stack. Currently, the only valid capability is - 'CAPABILITY_IAM'. - - :type tags: dict - :param tags: A dictionary of (key, value) pairs of tags to - associate with this stack. - - :rtype: string - :return: The unique Stack ID. + :rtype: dict + :return: Parsed JSON response data """ - params = self._build_create_or_update_params(stack_name, - template_body, template_url, parameters, notification_arns, - disable_rollback, timeout_in_minutes, capabilities, tags) - response = self.make_request('CreateStack', params, '/', 'POST') + response = self.make_request(call, params, path, method) body = response.read() if response.status == 200: body = json.loads(body) - return body['CreateStackResponse']['CreateStackResult']['StackId'] + return body else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + raise self.ResponseError(response.status, response.reason, body=body) + + def create_stack(self, stack_name, template_body=None, template_url=None, + parameters=None, notification_arns=None, disable_rollback=None, + timeout_in_minutes=None, capabilities=None, tags=None, + on_failure=None, stack_policy_body=None, stack_policy_url=None): + """ + Creates a stack as specified in the template. After the call + completes successfully, the stack creation starts. You can + check the status of the stack via the DescribeStacks API. + Currently, the limit for stacks is 20 stacks per account per + region. + + :type stack_name: string + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. + + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type capabilities: list + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: dict + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + """ + params = self._build_create_or_update_params(stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, on_failure, stack_policy_body, + stack_policy_url, tags) + body = self._do_request('CreateStack', params, '/', 'POST') + return body['CreateStackResponse']['CreateStackResult']['StackId'] def update_stack(self, stack_name, template_body=None, template_url=None, - parameters=[], notification_arns=[], disable_rollback=False, - timeout_in_minutes=None, capabilities=None, tags=None): + parameters=None, notification_arns=None, disable_rollback=False, + timeout_in_minutes=None, capabilities=None, tags=None, + stack_policy_during_update_body=None, + stack_policy_during_update_url=None, + stack_policy_body=None, stack_policy_url=None): """ - Updates a CloudFormation Stack as specified by the template. + Updates a stack as specified in the template. After the call + completes successfully, the stack update starts. You can check + the status of the stack via the DescribeStacks action. + + + + **Note: **You cannot update `AWS::S3::Bucket`_ resources, for + example, to add or modify tags. + + + + To get a copy of the template for an existing stack, you can + use the GetTemplate action. + + Tags that were associated with this stack during creation time + will still be associated with the stack after an `UpdateStack` + operation. + + For more information about creating an update template, + updating a stack, and monitoring the progress of the update, + see `Updating a Stack`_. :type stack_name: string - :param stack_name: The name of the Stack, must be unique amoung running - Stacks. + :param stack_name: + The name or stack ID of the stack to update. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. :type template_body: string - :param template_body: The template body (JSON string) + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. :type template_url: string - :param template_url: An S3 URL of a stored template JSON document. If - both the template_body and template_url are - specified, the template_body takes precedence. + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. - :type parameters: list of tuples - :param parameters: A list of (key, value) pairs for template input - parameters. + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. - :type notification_arns: list of strings - :param notification_arns: A list of SNS topics to send Stack event - notifications to. + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). :type disable_rollback: bool :param disable_rollback: Indicates whether or not to rollback on failure. - :type timeout_in_minutes: int - :param timeout_in_minutes: Maximum amount of time to let the Stack - spend creating itself. If this timeout is exceeded, - the Stack will enter the CREATE_FAILED state + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. :type capabilities: list :param capabilities: The list of capabilities you want to allow in @@ -250,38 +456,86 @@ class CloudFormationConnection(AWSQueryConnection): 'CAPABILITY_IAM'. :type tags: dict - :param tags: A dictionary of (key, value) pairs of tags to - associate with this stack. + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. :rtype: string :return: The unique Stack ID. """ - params = self._build_create_or_update_params(stack_name, - template_body, template_url, parameters, notification_arns, - disable_rollback, timeout_in_minutes, capabilities, tags) - response = self.make_request('UpdateStack', params, '/', 'POST') - body = response.read() - if response.status == 200: - body = json.loads(body) - return body['UpdateStackResponse']['UpdateStackResult']['StackId'] - else: - boto.log.error('%s %s' % (response.status, response.reason)) - boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + params = self._build_create_or_update_params(stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, None, stack_policy_body, + stack_policy_url, tags, stack_policy_during_update_body, + stack_policy_during_update_url) + body = self._do_request('UpdateStack', params, '/', 'POST') + return body['UpdateStackResponse']['UpdateStackResult']['StackId'] def delete_stack(self, stack_name_or_id): + """ + Deletes a specified stack. Once the call completes + successfully, stack deletion starts. Deleted stacks do not + show up in the DescribeStacks API if the deletion has been + completed successfully. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + + """ params = {'ContentType': "JSON", 'StackName': stack_name_or_id} - # TODO: change this to get_status ? - response = self.make_request('DeleteStack', params, '/', 'GET') - body = response.read() - if response.status == 200: - return json.loads(body) - else: - boto.log.error('%s %s' % (response.status, response.reason)) - boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + return self._do_request('DeleteStack', params, '/', 'GET') def describe_stack_events(self, stack_name_or_id=None, next_token=None): + """ + Returns all stack related events for a specified stack. For + more information about a stack's event history, go to + `Stacks`_ in the AWS CloudFormation User Guide. + Events are returned, even if the stack never existed or has + been successfully deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + events, if there is one. + Default: There is no default value. + + """ params = {} if stack_name_or_id: params['StackName'] = stack_name_or_id @@ -291,21 +545,82 @@ class CloudFormationConnection(AWSQueryConnection): StackEvent)]) def describe_stack_resource(self, stack_name_or_id, logical_resource_id): + """ + Returns a description of the specified resource in the + specified stack. + + For deleted stacks, DescribeStackResource returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + """ params = {'ContentType': "JSON", 'StackName': stack_name_or_id, 'LogicalResourceId': logical_resource_id} - response = self.make_request('DescribeStackResource', params, - '/', 'GET') - body = response.read() - if response.status == 200: - return json.loads(body) - else: - boto.log.error('%s %s' % (response.status, response.reason)) - boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + return self._do_request('DescribeStackResource', params, '/', 'GET') def describe_stack_resources(self, stack_name_or_id=None, logical_resource_id=None, physical_resource_id=None): + """ + Returns AWS resource descriptions for running and deleted + stacks. If `StackName` is specified, all the associated + resources that are part of the stack are returned. If + `PhysicalResourceId` is specified, the associated resources of + the stack that the resource belongs to are returned. + Only the first 100 resources will be returned. If your stack + has more resources than this, you should use + `ListStackResources` instead. + For deleted stacks, `DescribeStackResources` returns resource + information for up to 90 days after the stack has been + deleted. + + You must specify either `StackName` or `PhysicalResourceId`, + but not both. In addition, you can specify `LogicalResourceId` + to filter the returned result. For more information about + resources, the `LogicalResourceId` and `PhysicalResourceId`, + go to the `AWS CloudFormation User Guide`_. + A `ValidationError` is returned if you specify both + `StackName` and `PhysicalResourceId` in the same request. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Required: Conditional. If you do not specify `StackName`, you must + specify `PhysicalResourceId`. + + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + :type physical_resource_id: string + :param physical_resource_id: The name or unique identifier that + corresponds to a physical instance ID of a resource supported by + AWS CloudFormation. + For example, for an Amazon Elastic Compute Cloud (EC2) instance, + `PhysicalResourceId` corresponds to the `InstanceId`. You can pass + the EC2 `InstanceId` to `DescribeStackResources` to find which + stack the instance belongs to and what other resources are part of + the stack. + + Required: Conditional. If you do not specify `PhysicalResourceId`, you + must specify `StackName`. + + Default: There is no default value. + + """ params = {} if stack_name_or_id: params['StackName'] = stack_name_or_id @@ -316,35 +631,110 @@ class CloudFormationConnection(AWSQueryConnection): return self.get_list('DescribeStackResources', params, [('member', StackResource)]) - def describe_stacks(self, stack_name_or_id=None): + def describe_stacks(self, stack_name_or_id=None, next_token=None): + """ + Returns the description for the specified stack; if no stack + name was specified, then it returns the description for all + the stacks created. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + + """ params = {} if stack_name_or_id: params['StackName'] = stack_name_or_id + if next_token is not None: + params['NextToken'] = next_token return self.get_list('DescribeStacks', params, [('member', Stack)]) def get_template(self, stack_name_or_id): + """ + Returns the template body for a specified stack. You can get + the template for running or deleted stacks. + + For deleted stacks, GetTemplate returns the template for up to + 90 days after the stack has been deleted. + If the template does not exist, a `ValidationError` is + returned. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + """ params = {'ContentType': "JSON", 'StackName': stack_name_or_id} - response = self.make_request('GetTemplate', params, '/', 'GET') - body = response.read() - if response.status == 200: - return json.loads(body) - else: - boto.log.error('%s %s' % (response.status, response.reason)) - boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + return self._do_request('GetTemplate', params, '/', 'GET') def list_stack_resources(self, stack_name_or_id, next_token=None): + """ + Returns descriptions of all resources of the specified stack. + + For deleted stacks, ListStackResources returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stack resource summaries, if there is one. + Default: There is no default value. + + """ params = {'StackName': stack_name_or_id} if next_token: params['NextToken'] = next_token return self.get_list('ListStackResources', params, [('member', StackResourceSummary)]) - def list_stacks(self, stack_status_filters=[], next_token=None): + def list_stacks(self, stack_status_filters=None, next_token=None): + """ + Returns the summary information for stacks whose status + matches the specified StackStatusFilter. Summary information + for stacks that have been deleted is kept for 90 days after + the stack is deleted. If no StackStatusFilter is specified, + summary information for all stacks is returned (including + existing stacks and stacks that have been deleted). + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + Default: There is no default value. + + :type stack_status_filter: list + :param stack_status_filter: Stack status to use as a filter. Specify + one or more stack status codes to list only stacks with the + specified status codes. For a complete list of stack status codes, + see the `StackStatus` parameter of the Stack data type. + + """ params = {} if next_token: params['NextToken'] = next_token - if len(stack_status_filters) > 0: + if stack_status_filters and len(stack_status_filters) > 0: self.build_list_params(params, stack_status_filters, "StackStatusFilter.member") @@ -352,6 +742,25 @@ class CloudFormationConnection(AWSQueryConnection): [('member', StackSummary)]) def validate_template(self, template_body=None, template_url=None): + """ + Validates a specified template. + + :type template_body: string + :param template_body: String containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + """ params = {} if template_body: params['TemplateBody'] = template_body @@ -364,7 +773,116 @@ class CloudFormationConnection(AWSQueryConnection): verb="POST") def cancel_update_stack(self, stack_name_or_id=None): + """ + Cancels an update on the specified stack. If the call + completes successfully, the stack will roll back the update + and revert to the previous stack configuration. + Only stacks that are in the UPDATE_IN_PROGRESS state can be + canceled. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated with + the stack. + + """ params = {} if stack_name_or_id: params['StackName'] = stack_name_or_id return self.get_status('CancelUpdateStack', params) + + def estimate_template_cost(self, template_body=None, template_url=None, + parameters=None): + """ + Returns the estimated monthly cost of a template. The return + value is an AWS Simple Monthly Calculator URL with a query + string that describes the resources required to run the + template. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the template. + + :rtype: string + :returns: URL to pre-filled cost calculator + """ + params = {'ContentType': "JSON"} + if template_body is not None: + params['TemplateBody'] = template_body + if template_url is not None: + params['TemplateURL'] = template_url + if parameters and len(parameters) > 0: + for i, (key, value) in enumerate(parameters): + params['Parameters.member.%d.ParameterKey' % (i + 1)] = key + params['Parameters.member.%d.ParameterValue' % (i + 1)] = value + + response = self._do_request('EstimateTemplateCost', params, '/', 'POST') + return response['EstimateTemplateCostResponse']\ + ['EstimateTemplateCostResult']\ + ['Url'] + + def get_stack_policy(self, stack_name_or_id): + """ + Returns the stack policy for a specified stack. If a stack + doesn't have a policy, a null value is returned. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or stack ID that is associated with + the stack whose policy you want to get. + + :rtype: string + :return: The policy JSON document + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, } + response = self._do_request('GetStackPolicy', params, '/', 'POST') + return response['GetStackPolicyResponse']\ + ['GetStackPolicyResult']\ + ['StackPolicyBody'] + + def set_stack_policy(self, stack_name_or_id, stack_policy_body=None, + stack_policy_url=None): + """ + Sets a stack policy for a specified stack. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or stack ID that you want to + associate a policy with. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, } + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + + response = self._do_request('SetStackPolicy', params, '/', 'POST') + return response['SetStackPolicyResponse']\ + ['SetStackPolicyResult'] diff --git a/awx/lib/site-packages/boto/cloudformation/layer1.py b/awx/lib/site-packages/boto/cloudformation/layer1.py new file mode 100644 index 0000000000..8efb8ca62b --- /dev/null +++ b/awx/lib/site-packages/boto/cloudformation/layer1.py @@ -0,0 +1,773 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +try: + import json +except ImportError: + import simplejson as json + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudformation import exceptions + + +class CloudFormationConnection(AWSQueryConnection): + """ + AWS CloudFormation + AWS CloudFormation enables you to create and manage AWS + infrastructure deployments predictably and repeatedly. AWS + CloudFormation helps you leverage AWS products such as Amazon EC2, + EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, + highly scalable, cost effective applications without worrying + about creating and configuring the underlying AWS infrastructure. + + With AWS CloudFormation, you declare all of your resources and + dependencies in a template file. The template defines a collection + of resources as a single unit called a stack. AWS CloudFormation + creates and deletes all member resources of the stack together and + manages all dependencies between the resources for you. + + For more information about this product, go to the `CloudFormation + Product Page`_. + + Amazon CloudFormation makes use of other AWS products. If you need + additional technical information about a specific AWS product, you + can find the product's technical documentation at + `http://aws.amazon.com/documentation/`_. + """ + APIVersion = "2010-05-15" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudformation.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "AlreadyExistsException": exceptions.AlreadyExistsException, + "InsufficientCapabilitiesException": exceptions.InsufficientCapabilitiesException, + "LimitExceededException": exceptions.LimitExceededException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(CloudFormationConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def cancel_update_stack(self, stack_name): + """ + Cancels an update on the specified stack. If the call + completes successfully, the stack will roll back the update + and revert to the previous stack configuration. + Only stacks that are in the UPDATE_IN_PROGRESS state can be + canceled. + + :type stack_name: string + :param stack_name: The name or the unique identifier associated with + the stack. + + """ + params = {'StackName': stack_name, } + return self._make_request( + action='CancelUpdateStack', + verb='POST', + path='/', params=params) + + def create_stack(self, stack_name, template_body=None, template_url=None, + parameters=None, disable_rollback=None, + timeout_in_minutes=None, notification_arns=None, + capabilities=None, on_failure=None, + stack_policy_body=None, stack_policy_url=None, + tags=None): + """ + Creates a stack as specified in the template. After the call + completes successfully, the stack creation starts. You can + check the status of the stack via the DescribeStacks API. + Currently, the limit for stacks is 20 stacks per account per + region. + + :type stack_name: string + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of `Parameter` structures that specify input + parameters for the stack. + + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type capabilities: list + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: list + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + """ + params = {'StackName': stack_name, } + if template_body is not None: + params['TemplateBody'] = template_body + if template_url is not None: + params['TemplateURL'] = template_url + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterKey', 'ParameterValue')) + if disable_rollback is not None: + params['DisableRollback'] = str( + disable_rollback).lower() + if timeout_in_minutes is not None: + params['TimeoutInMinutes'] = timeout_in_minutes + if notification_arns is not None: + self.build_list_params(params, + notification_arns, + 'NotificationARNs.member') + if capabilities is not None: + self.build_list_params(params, + capabilities, + 'Capabilities.member') + if on_failure is not None: + params['OnFailure'] = on_failure + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateStack', + verb='POST', + path='/', params=params) + + def delete_stack(self, stack_name): + """ + Deletes a specified stack. Once the call completes + successfully, stack deletion starts. Deleted stacks do not + show up in the DescribeStacks API if the deletion has been + completed successfully. + + :type stack_name: string + :param stack_name: The name or the unique identifier associated with + the stack. + + """ + params = {'StackName': stack_name, } + return self._make_request( + action='DeleteStack', + verb='POST', + path='/', params=params) + + def describe_stack_events(self, stack_name=None, next_token=None): + """ + Returns all stack related events for a specified stack. For + more information about a stack's event history, go to + `Stacks`_ in the AWS CloudFormation User Guide. + Events are returned, even if the stack never existed or has + been successfully deleted. + + :type stack_name: string + :param stack_name: The name or the unique identifier associated with + the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + events, if there is one. + Default: There is no default value. + + """ + params = {} + if stack_name is not None: + params['StackName'] = stack_name + if next_token is not None: + params['NextToken'] = next_token + return self._make_request( + action='DescribeStackEvents', + verb='POST', + path='/', params=params) + + def describe_stack_resource(self, stack_name, logical_resource_id): + """ + Returns a description of the specified resource in the + specified stack. + + For deleted stacks, DescribeStackResource returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name: string + :param stack_name: The name or the unique identifier associated with + the stack. + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + """ + params = { + 'StackName': stack_name, + 'LogicalResourceId': logical_resource_id, + } + return self._make_request( + action='DescribeStackResource', + verb='POST', + path='/', params=params) + + def describe_stack_resources(self, stack_name=None, + logical_resource_id=None, + physical_resource_id=None): + """ + Returns AWS resource descriptions for running and deleted + stacks. If `StackName` is specified, all the associated + resources that are part of the stack are returned. If + `PhysicalResourceId` is specified, the associated resources of + the stack that the resource belongs to are returned. + Only the first 100 resources will be returned. If your stack + has more resources than this, you should use + `ListStackResources` instead. + For deleted stacks, `DescribeStackResources` returns resource + information for up to 90 days after the stack has been + deleted. + + You must specify either `StackName` or `PhysicalResourceId`, + but not both. In addition, you can specify `LogicalResourceId` + to filter the returned result. For more information about + resources, the `LogicalResourceId` and `PhysicalResourceId`, + go to the `AWS CloudFormation User Guide`_. + A `ValidationError` is returned if you specify both + `StackName` and `PhysicalResourceId` in the same request. + + :type stack_name: string + :param stack_name: The name or the unique identifier associated with + the stack. + Required: Conditional. If you do not specify `StackName`, you must + specify `PhysicalResourceId`. + + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + :type physical_resource_id: string + :param physical_resource_id: The name or unique identifier that + corresponds to a physical instance ID of a resource supported by + AWS CloudFormation. + For example, for an Amazon Elastic Compute Cloud (EC2) instance, + `PhysicalResourceId` corresponds to the `InstanceId`. You can pass + the EC2 `InstanceId` to `DescribeStackResources` to find which + stack the instance belongs to and what other resources are part of + the stack. + + Required: Conditional. If you do not specify `PhysicalResourceId`, you + must specify `StackName`. + + Default: There is no default value. + + """ + params = {} + if stack_name is not None: + params['StackName'] = stack_name + if logical_resource_id is not None: + params['LogicalResourceId'] = logical_resource_id + if physical_resource_id is not None: + params['PhysicalResourceId'] = physical_resource_id + return self._make_request( + action='DescribeStackResources', + verb='POST', + path='/', params=params) + + def describe_stacks(self, stack_name=None, next_token=None): + """ + Returns the description for the specified stack; if no stack + name was specified, then it returns the description for all + the stacks created. + + :type stack_name: string + :param stack_name: The name or the unique identifier associated with + the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + + """ + params = {} + if stack_name is not None: + params['StackName'] = stack_name + if next_token is not None: + params['NextToken'] = next_token + return self._make_request( + action='DescribeStacks', + verb='POST', + path='/', params=params) + + def estimate_template_cost(self, template_body=None, template_url=None, + parameters=None): + """ + Returns the estimated monthly cost of a template. The return + value is an AWS Simple Monthly Calculator URL with a query + string that describes the resources required to run the + template. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of `Parameter` structures that specify input + parameters. + + """ + params = {} + if template_body is not None: + params['TemplateBody'] = template_body + if template_url is not None: + params['TemplateURL'] = template_url + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterKey', 'ParameterValue')) + return self._make_request( + action='EstimateTemplateCost', + verb='POST', + path='/', params=params) + + def get_stack_policy(self, stack_name): + """ + Returns the stack policy for a specified stack. If a stack + doesn't have a policy, a null value is returned. + + :type stack_name: string + :param stack_name: The name or stack ID that is associated with the + stack whose policy you want to get. + + """ + params = {'StackName': stack_name, } + return self._make_request( + action='GetStackPolicy', + verb='POST', + path='/', params=params) + + def get_template(self, stack_name): + """ + Returns the template body for a specified stack. You can get + the template for running or deleted stacks. + + For deleted stacks, GetTemplate returns the template for up to + 90 days after the stack has been deleted. + If the template does not exist, a `ValidationError` is + returned. + + :type stack_name: string + :param stack_name: The name or the unique identifier associated with + the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + """ + params = {'StackName': stack_name, } + return self._make_request( + action='GetTemplate', + verb='POST', + path='/', params=params) + + def list_stack_resources(self, stack_name, next_token=None): + """ + Returns descriptions of all resources of the specified stack. + + For deleted stacks, ListStackResources returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name: string + :param stack_name: The name or the unique identifier associated with + the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stack resource summaries, if there is one. + Default: There is no default value. + + """ + params = {'StackName': stack_name, } + if next_token is not None: + params['NextToken'] = next_token + return self._make_request( + action='ListStackResources', + verb='POST', + path='/', params=params) + + def list_stacks(self, next_token=None, stack_status_filter=None): + """ + Returns the summary information for stacks whose status + matches the specified StackStatusFilter. Summary information + for stacks that have been deleted is kept for 90 days after + the stack is deleted. If no StackStatusFilter is specified, + summary information for all stacks is returned (including + existing stacks and stacks that have been deleted). + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + Default: There is no default value. + + :type stack_status_filter: list + :param stack_status_filter: Stack status to use as a filter. Specify + one or more stack status codes to list only stacks with the + specified status codes. For a complete list of stack status codes, + see the `StackStatus` parameter of the Stack data type. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + if stack_status_filter is not None: + self.build_list_params(params, + stack_status_filter, + 'StackStatusFilter.member') + return self._make_request( + action='ListStacks', + verb='POST', + path='/', params=params) + + def set_stack_policy(self, stack_name, stack_policy_body=None, + stack_policy_url=None): + """ + Sets a stack policy for a specified stack. + + :type stack_name: string + :param stack_name: The name or stack ID that you want to associate a + policy with. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + + """ + params = {'StackName': stack_name, } + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + return self._make_request( + action='SetStackPolicy', + verb='POST', + path='/', params=params) + + def update_stack(self, stack_name, template_body=None, template_url=None, + stack_policy_during_update_body=None, + stack_policy_during_update_url=None, parameters=None, + capabilities=None, stack_policy_body=None, + stack_policy_url=None): + """ + Updates a stack as specified in the template. After the call + completes successfully, the stack update starts. You can check + the status of the stack via the DescribeStacks action. + + + + **Note: **You cannot update `AWS::S3::Bucket`_ resources, for + example, to add or modify tags. + + + + To get a copy of the template for an existing stack, you can + use the GetTemplate action. + + Tags that were associated with this stack during creation time + will still be associated with the stack after an `UpdateStack` + operation. + + For more information about creating an update template, + updating a stack, and monitoring the progress of the update, + see `Updating a Stack`_. + + :type stack_name: string + :param stack_name: + The name or stack ID of the stack to update. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. + + :type parameters: list + :param parameters: A list of `Parameter` structures that specify input + parameters for the stack. + + :type capabilities: list + :param capabilities: The list of capabilities that you want to allow in + the stack. If your stack contains IAM resources, you must specify + the CAPABILITY_IAM value for this parameter; otherwise, this action + returns an InsufficientCapabilities error. IAM resources are the + following: `AWS::IAM::AccessKey`_, `AWS::IAM::Group`_, + `AWS::IAM::Policy`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the updated stack policy + body. If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + If you want to update a stack policy during a stack update, specify an + updated stack policy. For example, you can include an updated stack + policy to protect a new resource created in the stack update. If + you do not specify a stack policy, the current policy that is + associated with the stack is unchanged. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the updated + stack policy. The URL must point to a policy (max size: 16KB) + located in an S3 bucket in the same region as the stack. If you + pass `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` + is used. + If you want to update a stack policy during a stack update, specify an + updated stack policy. For example, you can include an updated stack + policy to protect a new resource created in the stack update. If + you do not specify a stack policy, the current policy that is + associated with the stack is unchanged. + + """ + params = {'StackName': stack_name, } + if template_body is not None: + params['TemplateBody'] = template_body + if template_url is not None: + params['TemplateURL'] = template_url + if stack_policy_during_update_body is not None: + params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body + if stack_policy_during_update_url is not None: + params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterKey', 'ParameterValue')) + if capabilities is not None: + self.build_list_params(params, + capabilities, + 'Capabilities.member') + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + return self._make_request( + action='UpdateStack', + verb='POST', + path='/', params=params) + + def validate_template(self, template_body=None, template_url=None): + """ + Validates a specified template. + + :type template_body: string + :param template_body: String containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + """ + params = {} + if template_body is not None: + params['TemplateBody'] = template_body + if template_url is not None: + params['TemplateURL'] = template_url + return self._make_request( + action='ValidateTemplate', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/awx/lib/site-packages/boto/cloudformation/stack.py b/awx/lib/site-packages/boto/cloudformation/stack.py index c173de664d..5dac0dd7cf 100644 --- a/awx/lib/site-packages/boto/cloudformation/stack.py +++ b/awx/lib/site-packages/boto/cloudformation/stack.py @@ -107,6 +107,35 @@ class Stack(object): def get_template(self): return self.connection.get_template(stack_name_or_id=self.stack_id) + def get_policy(self): + """ + Returns the stack policy for this stack. If it has no policy + then, a null value is returned. + """ + return self.connection.get_stack_policy(self.stack_id) + + def set_policy(self, stack_policy_body=None, stack_policy_url=None): + """ + Sets a stack policy for this stack. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + """ + return self.connection.set_stack_policy(self.stack_id, + stack_policy_body=stack_policy_body, + stack_policy_url=stack_policy_url) + class StackSummary(object): def __init__(self, connection=None): diff --git a/awx/lib/site-packages/boto/cloudformation/template.py b/awx/lib/site-packages/boto/cloudformation/template.py index 762efce55a..bab2148630 100644 --- a/awx/lib/site-packages/boto/cloudformation/template.py +++ b/awx/lib/site-packages/boto/cloudformation/template.py @@ -1,21 +1,29 @@ from boto.resultset import ResultSet +from boto.cloudformation.stack import Capability class Template(object): def __init__(self, connection=None): self.connection = connection self.description = None self.template_parameters = None + self.capabilities_reason = None + self.capabilities = None def startElement(self, name, attrs, connection): if name == "Parameters": self.template_parameters = ResultSet([('member', TemplateParameter)]) return self.template_parameters + elif name == "Capabilities": + self.capabilities = ResultSet([('member', Capability)]) + return self.capabilities else: return None def endElement(self, name, value, connection): if name == "Description": self.description = value + elif name == "CapabilitiesReason": + self.capabilities_reason = value else: setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/cloudfront/__init__.py b/awx/lib/site-packages/boto/cloudfront/__init__.py index 0fa4ae8134..1afefebbf3 100644 --- a/awx/lib/site-packages/boto/cloudfront/__init__.py +++ b/awx/lib/site-packages/boto/cloudfront/__init__.py @@ -43,12 +43,14 @@ class CloudFrontConnection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, port=None, proxy=None, proxy_port=None, host=DefaultHost, debug=0, security_token=None, - validate_certs=True): + validate_certs=True, profile_name=None, https_connection_factory=None): super(CloudFrontConnection, self).__init__(host, aws_access_key_id, aws_secret_access_key, True, port, proxy, proxy_port, debug=debug, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + https_connection_factory=https_connection_factory, + profile_name=profile_name) def get_etag(self, response): response_headers = response.msg diff --git a/awx/lib/site-packages/boto/cloudfront/distribution.py b/awx/lib/site-packages/boto/cloudfront/distribution.py index 9992ab6ffb..5566bdfec2 100644 --- a/awx/lib/site-packages/boto/cloudfront/distribution.py +++ b/awx/lib/site-packages/boto/cloudfront/distribution.py @@ -103,6 +103,9 @@ class DistributionConfig(object): self.logging = logging self.default_root_object = default_root_object + def __repr__(self): + return "DistributionConfig:%s" % self.origin + def to_xml(self): s = '\n' s += '\n' @@ -234,6 +237,9 @@ class DistributionSummary(object): self.etag = None self.streaming = False + def __repr__(self): + return "DistributionSummary:%s" % self.domain_name + def startElement(self, name, attrs, connection): if name == 'TrustedSigners': self.trusted_signers = TrustedSigners() @@ -295,6 +301,9 @@ class Distribution(object): self._bucket = None self._object_class = Object + def __repr__(self): + return "Distribution:%s" % self.domain_name + def startElement(self, name, attrs, connection): if name == 'DistributionConfig': self.config = DistributionConfig() @@ -350,11 +359,11 @@ class Distribution(object): self.config.cnames, self.config.comment, self.config.trusted_signers, self.config.default_root_object) - if enabled != None: + if enabled is not None: new_config.enabled = enabled - if cnames != None: + if cnames is not None: new_config.cnames = cnames - if comment != None: + if comment is not None: new_config.comment = comment self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config) self.config = new_config @@ -730,11 +739,11 @@ class StreamingDistribution(Distribution): self.config.cnames, self.config.comment, self.config.trusted_signers) - if enabled != None: + if enabled is not None: new_config.enabled = enabled - if cnames != None: + if cnames is not None: new_config.cnames = cnames - if comment != None: + if comment is not None: new_config.comment = comment self.etag = self.connection.set_streaming_distribution_config(self.id, self.etag, diff --git a/awx/lib/site-packages/boto/cloudfront/identity.py b/awx/lib/site-packages/boto/cloudfront/identity.py index 123773d15d..de79c8ac76 100644 --- a/awx/lib/site-packages/boto/cloudfront/identity.py +++ b/awx/lib/site-packages/boto/cloudfront/identity.py @@ -52,7 +52,7 @@ class OriginAccessIdentity(object): new_config = OriginAccessIdentityConfig(self.connection, self.config.caller_reference, self.config.comment) - if comment != None: + if comment is not None: new_config.comment = comment self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config) self.config = new_config diff --git a/awx/lib/site-packages/boto/cloudfront/invalidation.py b/awx/lib/site-packages/boto/cloudfront/invalidation.py index dcc3c4c54f..91ba89d9ee 100644 --- a/awx/lib/site-packages/boto/cloudfront/invalidation.py +++ b/awx/lib/site-packages/boto/cloudfront/invalidation.py @@ -75,7 +75,7 @@ class InvalidationBatch(object): def to_xml(self): """Get this batch as XML""" - assert self.connection != None + assert self.connection is not None s = '\n' s += '\n' % self.connection.Version for p in self.paths: diff --git a/awx/lib/site-packages/boto/cloudsearch/__init__.py b/awx/lib/site-packages/boto/cloudsearch/__init__.py index 466ad42628..451a6bfab8 100644 --- a/awx/lib/site-packages/boto/cloudsearch/__init__.py +++ b/awx/lib/site-packages/boto/cloudsearch/__init__.py @@ -21,7 +21,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -32,23 +32,10 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ import boto.cloudsearch.layer1 - return [RegionInfo(name='us-east-1', - endpoint='cloudsearch.us-east-1.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - RegionInfo(name='eu-west-1', - endpoint='cloudsearch.eu-west-1.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - RegionInfo(name='us-west-1', - endpoint='cloudsearch.us-west-1.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - RegionInfo(name='us-west-2', - endpoint='cloudsearch.us-west-2.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - RegionInfo(name='ap-southeast-1', - endpoint='cloudsearch.ap-southeast-1.amazonaws.com', - connection_cls=boto.cloudsearch.layer1.Layer1), - - ] + return get_regions( + 'cloudsearch', + connection_cls=boto.cloudsearch.layer1.Layer1 + ) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/cloudsearch/layer1.py b/awx/lib/site-packages/boto/cloudsearch/layer1.py index 4ca763e9f0..92ebe08207 100644 --- a/awx/lib/site-packages/boto/cloudsearch/layer1.py +++ b/awx/lib/site-packages/boto/cloudsearch/layer1.py @@ -46,7 +46,7 @@ class Layer1(AWSQueryConnection): proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', api_version=None, security_token=None, - validate_certs=True): + validate_certs=True, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) @@ -66,7 +66,8 @@ class Layer1(AWSQueryConnection): https_connection_factory=https_connection_factory, path=path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -88,7 +89,7 @@ class Layer1(AWSQueryConnection): for p in doc_path: inner = inner.get(p) if not inner: - return None if list_marker == None else [] + return None if list_marker is None else [] if isinstance(inner, list): return inner else: diff --git a/awx/lib/site-packages/boto/cloudtrail/__init__.py b/awx/lib/site-packages/boto/cloudtrail/__init__.py index 836f57fcaf..263caffa6d 100644 --- a/awx/lib/site-packages/boto/cloudtrail/__init__.py +++ b/awx/lib/site-packages/boto/cloudtrail/__init__.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,14 +31,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.cloudtrail.layer1 import CloudTrailConnection - - return [RegionInfo(name='us-east-1', - endpoint='cloudtrail.us-east-1.amazonaws.com', - connection_cls=CloudTrailConnection), - RegionInfo(name='us-west-2', - endpoint='cloudtrail.us-west-2.amazonaws.com', - connection_cls=CloudTrailConnection), - ] + return get_regions('cloudtrail', connection_cls=CloudTrailConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/connection.py b/awx/lib/site-packages/boto/connection.py index 592a0098bd..a178d1fdf6 100644 --- a/awx/lib/site-packages/boto/connection.py +++ b/awx/lib/site-packages/boto/connection.py @@ -45,6 +45,7 @@ Handles basic connections to AWS from __future__ import with_statement import base64 +from datetime import datetime import errno import httplib import os @@ -423,7 +424,7 @@ class AWSAuthConnection(object): https_connection_factory=None, path='/', provider='aws', security_token=None, suppress_consec_slashes=True, - validate_certs=True): + validate_certs=True, profile_name=None): """ :type host: str :param host: The host to make the connection to @@ -434,6 +435,10 @@ class AWSAuthConnection(object): :keyword str aws_secret_access_key: Your AWS Secret Access Key (provided by Amazon). If none is specified, the value in your ``AWS_SECRET_ACCESS_KEY`` environmental variable is used. + :keyword str security_token: The security token associated with + temporary credentials issued by STS. Optional unless using + temporary credentials. If none is specified, the environment + variable ``AWS_SECURITY_TOKEN`` is used if defined. :type is_secure: boolean :param is_secure: Whether the connection is over SSL @@ -464,6 +469,10 @@ class AWSAuthConnection(object): :type validate_certs: bool :param validate_certs: Controls whether SSL certificates will be validated or not. Defaults to True. + + :type profile_name: str + :param profile_name: Override usual Credentials section in config + file to use a named set of keys instead. """ self.suppress_consec_slashes = suppress_consec_slashes self.num_retries = 6 @@ -485,8 +494,11 @@ class AWSAuthConnection(object): "support this feature are not available. Certificate " "validation is only supported when running under Python " "2.6 or later.") - self.ca_certificates_file = config.get_value( + certs_file = config.get_value( 'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE) + if certs_file == 'system': + certs_file = None + self.ca_certificates_file = certs_file if port: self.port = port else: @@ -542,7 +554,8 @@ class AWSAuthConnection(object): self.provider = Provider(self._provider_type, aws_access_key_id, aws_secret_access_key, - security_token) + security_token, + profile_name) # Allow config file to override default host, port, and host header. if self.provider.host: @@ -559,6 +572,7 @@ class AWSAuthConnection(object): host, config, self.provider, self._required_auth_capability()) if getattr(self, 'AuthServiceName', None) is not None: self.auth_service_name = self.AuthServiceName + self.request_hook = None def __repr__(self): return '%s:%s' % (self.__class__.__name__, self.host) @@ -599,6 +613,10 @@ class AWSAuthConnection(object): gs_secret_access_key = aws_secret_access_key secret_key = aws_secret_access_key + def profile_name(self): + return self.provider.profile_name + profile_name = property(profile_name) + def get_path(self, path='/'): # The default behavior is to suppress consecutive slashes for reasons # discussed at @@ -680,7 +698,7 @@ class AWSAuthConnection(object): self.proxy_port = self.port self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') - self.use_proxy = (self.proxy != None) + self.use_proxy = (self.proxy is not None) def get_http_connection(self, host, port, is_secure): conn = self._pool.get_http_connection(host, port, is_secure) @@ -806,9 +824,12 @@ class AWSAuthConnection(object): h = httplib.HTTPConnection(host) if self.https_validate_certificates and HAVE_HTTPS_CONNECTION: - boto.log.debug("wrapping ssl socket for proxied connection; " - "CA certificate file=%s", - self.ca_certificates_file) + msg = "wrapping ssl socket for proxied connection; " + if self.ca_certificates_file: + msg += "CA certificate file=%s" %self.ca_certificates_file + else: + msg += "using system provided SSL certs" + boto.log.debug(msg) key_file = self.http_connection_kwargs.get('key_file', None) cert_file = self.http_connection_kwargs.get('cert_file', None) sslSock = ssl.wrap_socket(sock, keyfile=key_file, @@ -847,6 +868,9 @@ class AWSAuthConnection(object): except AttributeError: request.headers['Host'] = self.host.split(':', 1)[0] + def set_request_hook(self, hook): + self.request_hook = hook + def _mexe(self, request, sender=None, override_num_retries=None, retry_handler=None): """ @@ -887,8 +911,9 @@ class AWSAuthConnection(object): # the port info. All others should be now be up to date and # not include the port. if 's3' not in self._required_auth_capability(): - self.set_host_header(request) - + if not getattr(self, 'anon', False): + self.set_host_header(request) + request.start_time = datetime.now() if callable(sender): response = sender(connection, request.method, request.path, request.body, request.headers) @@ -929,6 +954,8 @@ class AWSAuthConnection(object): else: self.put_http_connection(request.host, request.port, self.is_secure, connection) + if self.request_hook is not None: + self.request_hook.handle_request_data(request, response) return response else: scheme, request.host, request.path, \ @@ -969,6 +996,8 @@ class AWSAuthConnection(object): # and stil haven't succeeded. So, if we have a response object, # use it to raise an exception. # Otherwise, raise the exception that must have already happened. + if self.request_hook is not None: + self.request_hook.handle_request_data(request, response, error=True) if response: raise BotoServerError(response.status, response.reason, body) elif e: @@ -982,11 +1011,11 @@ class AWSAuthConnection(object): path = self.get_path(path) if auth_path is not None: auth_path = self.get_path(auth_path) - if params == None: + if params is None: params = {} else: params = params.copy() - if headers == None: + if headers is None: headers = {} else: headers = headers.copy() @@ -1033,14 +1062,15 @@ class AWSQueryConnection(AWSAuthConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=None, debug=0, https_connection_factory=None, path='/', security_token=None, - validate_certs=True): + validate_certs=True, profile_name=None): super(AWSQueryConnection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug, https_connection_factory, path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return [] diff --git a/awx/lib/site-packages/boto/datapipeline/__init__.py b/awx/lib/site-packages/boto/datapipeline/__init__.py index e69de29bb2..1f61ea67e9 100644 --- a/awx/lib/site-packages/boto/datapipeline/__init__.py +++ b/awx/lib/site-packages/boto/datapipeline/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the AWS Datapipeline service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.datapipeline.layer1 import DataPipelineConnection + return get_regions('datapipeline', connection_cls=DataPipelineConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/datapipeline/layer1.py b/awx/lib/site-packages/boto/datapipeline/layer1.py index 0d9043521e..6635f01c2d 100644 --- a/awx/lib/site-packages/boto/datapipeline/layer1.py +++ b/awx/lib/site-packages/boto/datapipeline/layer1.py @@ -85,7 +85,7 @@ class DataPipelineConnection(AWSQueryConnection): def __init__(self, **kwargs): - region = kwargs.get('region') + region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) diff --git a/awx/lib/site-packages/boto/directconnect/__init__.py b/awx/lib/site-packages/boto/directconnect/__init__.py index 0fa314ca69..2603177d62 100644 --- a/awx/lib/site-packages/boto/directconnect/__init__.py +++ b/awx/lib/site-packages/boto/directconnect/__init__.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,32 +31,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.directconnect.layer1 import DirectConnectConnection - - return [RegionInfo(name='us-east-1', - endpoint='directconnect.us-east-1.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='us-west-1', - endpoint='directconnect.us-west-1.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='us-west-2', - endpoint='directconnect.us-west-2.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='eu-west-1', - endpoint='directconnect.eu-west-1.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='ap-southeast-1', - endpoint='directconnect.ap-southeast-1.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='ap-southeast-2', - endpoint='directconnect.ap-southeast-2.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='ap-southeast-3', - endpoint='directconnect.ap-southeast-3.amazonaws.com', - connection_cls=DirectConnectConnection), - RegionInfo(name='sa-east-1', - endpoint='directconnect.sa-east-1.amazonaws.com', - connection_cls=DirectConnectConnection), - ] + return get_regions('directconnect', connection_cls=DirectConnectConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/dynamodb/__init__.py b/awx/lib/site-packages/boto/dynamodb/__init__.py index a6bd27395e..8d548167d8 100644 --- a/awx/lib/site-packages/boto/dynamodb/__init__.py +++ b/awx/lib/site-packages/boto/dynamodb/__init__.py @@ -21,7 +21,7 @@ # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -32,37 +32,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ import boto.dynamodb.layer2 - return [RegionInfo(name='us-east-1', - endpoint='dynamodb.us-east-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='us-gov-west-1', - endpoint='dynamodb.us-gov-west-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='us-west-1', - endpoint='dynamodb.us-west-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='us-west-2', - endpoint='dynamodb.us-west-2.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='ap-northeast-1', - endpoint='dynamodb.ap-northeast-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='ap-southeast-1', - endpoint='dynamodb.ap-southeast-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='ap-southeast-2', - endpoint='dynamodb.ap-southeast-2.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='eu-west-1', - endpoint='dynamodb.eu-west-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='sa-east-1', - endpoint='dynamodb.sa-east-1.amazonaws.com', - connection_cls=boto.dynamodb.layer2.Layer2), - RegionInfo(name='cn-north-1', - endpoint='dynamodb.cn-north-1.amazonaws.com.cn', - connection_cls=boto.dynamodb.layer2.Layer2), - ] + return get_regions('dynamodb', connection_cls=boto.dynamodb.layer2.Layer2) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/dynamodb/item.py b/awx/lib/site-packages/boto/dynamodb/item.py index 9d9290963d..9dcbad0628 100644 --- a/awx/lib/site-packages/boto/dynamodb/item.py +++ b/awx/lib/site-packages/boto/dynamodb/item.py @@ -41,13 +41,13 @@ class Item(dict): self._updates = None self._hash_key_name = self.table.schema.hash_key_name self._range_key_name = self.table.schema.range_key_name - if attrs == None: + if attrs is None: attrs = {} - if hash_key == None: + if hash_key is None: hash_key = attrs.get(self._hash_key_name, None) self[self._hash_key_name] = hash_key if self._range_key_name: - if range_key == None: + if range_key is None: range_key = attrs.get(self._range_key_name, None) self[self._range_key_name] = range_key self._updates = {} diff --git a/awx/lib/site-packages/boto/dynamodb/layer1.py b/awx/lib/site-packages/boto/dynamodb/layer1.py index ca11ca43de..317cf43370 100644 --- a/awx/lib/site-packages/boto/dynamodb/layer1.py +++ b/awx/lib/site-packages/boto/dynamodb/layer1.py @@ -74,7 +74,7 @@ class Layer1(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, debug=0, security_token=None, region=None, - validate_certs=True, validate_checksums=True): + validate_certs=True, validate_checksums=True, profile_name=None): if not region: region_name = boto.config.get('DynamoDB', 'region', self.DefaultRegionName) @@ -89,7 +89,8 @@ class Layer1(AWSAuthConnection): aws_secret_access_key, is_secure, port, proxy, proxy_port, debug=debug, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) self.throughput_exceeded_events = 0 self._validate_checksums = boto.config.getbool( 'DynamoDB', 'validate_checksums', validate_checksums) diff --git a/awx/lib/site-packages/boto/dynamodb/layer2.py b/awx/lib/site-packages/boto/dynamodb/layer2.py index 16fcdbbb23..743c7055cd 100644 --- a/awx/lib/site-packages/boto/dynamodb/layer2.py +++ b/awx/lib/site-packages/boto/dynamodb/layer2.py @@ -145,11 +145,13 @@ class Layer2(object): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, debug=0, security_token=None, region=None, - validate_certs=True, dynamizer=LossyFloatDynamizer): + validate_certs=True, dynamizer=LossyFloatDynamizer, + profile_name=None): self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, debug, security_token, region, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) self.dynamizer = dynamizer() def use_decimals(self): diff --git a/awx/lib/site-packages/boto/dynamodb/types.py b/awx/lib/site-packages/boto/dynamodb/types.py index 987e0d0f10..4c3270ba23 100644 --- a/awx/lib/site-packages/boto/dynamodb/types.py +++ b/awx/lib/site-packages/boto/dynamodb/types.py @@ -136,6 +136,9 @@ def dynamize_value(val): class Binary(object): def __init__(self, value): + if not isinstance(value, basestring): + raise TypeError('Value must be a string of binary data!') + self.value = value def encode(self): diff --git a/awx/lib/site-packages/boto/dynamodb2/__init__.py b/awx/lib/site-packages/boto/dynamodb2/__init__.py index 23f4c5abfc..aa07e5cce4 100644 --- a/awx/lib/site-packages/boto/dynamodb2/__init__.py +++ b/awx/lib/site-packages/boto/dynamodb2/__init__.py @@ -21,7 +21,7 @@ # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -32,37 +32,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.dynamodb2.layer1 import DynamoDBConnection - return [RegionInfo(name='us-east-1', - endpoint='dynamodb.us-east-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='us-gov-west-1', - endpoint='dynamodb.us-gov-west-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='us-west-1', - endpoint='dynamodb.us-west-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='us-west-2', - endpoint='dynamodb.us-west-2.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='eu-west-1', - endpoint='dynamodb.eu-west-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='ap-northeast-1', - endpoint='dynamodb.ap-northeast-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='ap-southeast-1', - endpoint='dynamodb.ap-southeast-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='ap-southeast-2', - endpoint='dynamodb.ap-southeast-2.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='sa-east-1', - endpoint='dynamodb.sa-east-1.amazonaws.com', - connection_cls=DynamoDBConnection), - RegionInfo(name='cn-north-1', - endpoint='dynamodb.cn-north-1.amazonaws.com.cn', - connection_cls=DynamoDBConnection), - ] + return get_regions('dynamodb', connection_cls=DynamoDBConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/dynamodb2/exceptions.py b/awx/lib/site-packages/boto/dynamodb2/exceptions.py index a9fcf75b27..3a677e09df 100644 --- a/awx/lib/site-packages/boto/dynamodb2/exceptions.py +++ b/awx/lib/site-packages/boto/dynamodb2/exceptions.py @@ -72,3 +72,7 @@ class UnknownFilterTypeError(DynamoDBError): class QueryError(DynamoDBError): pass + + +class ItemNotFound(DynamoDBError): + pass diff --git a/awx/lib/site-packages/boto/dynamodb2/fields.py b/awx/lib/site-packages/boto/dynamodb2/fields.py index 911a11b5b0..4443969e4e 100644 --- a/awx/lib/site-packages/boto/dynamodb2/fields.py +++ b/awx/lib/site-packages/boto/dynamodb2/fields.py @@ -323,7 +323,10 @@ class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex): projection_type = 'INCLUDE' def __init__(self, *args, **kwargs): + throughput = kwargs.pop('throughput', None) IncludeIndex.__init__(self, *args, **kwargs) + if throughput: + kwargs['throughput'] = throughput GlobalBaseIndexField.__init__(self, *args, **kwargs) def schema(self): @@ -331,4 +334,4 @@ class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex): schema_data = IncludeIndex.schema(self) # Also the throughput. schema_data.update(GlobalBaseIndexField.schema(self)) - return schema_data \ No newline at end of file + return schema_data diff --git a/awx/lib/site-packages/boto/dynamodb2/results.py b/awx/lib/site-packages/boto/dynamodb2/results.py index 8ddf3022c6..98da8a6af7 100644 --- a/awx/lib/site-packages/boto/dynamodb2/results.py +++ b/awx/lib/site-packages/boto/dynamodb2/results.py @@ -20,7 +20,7 @@ class ResultSet(object): ... print res['username'] """ - def __init__(self): + def __init__(self, max_page_size=None): super(ResultSet, self).__init__() self.the_callable = None self.call_args = [] @@ -29,6 +29,9 @@ class ResultSet(object): self._offset = -1 self._results_left = True self._last_key_seen = None + self._fetches = 0 + self._max_page_size = max_page_size + self._limit = None @property def first_key(self): @@ -65,6 +68,12 @@ class ResultSet(object): self.fetch_more() if self._offset < len(self._results): + if self._limit is not None: + self._limit -= 1 + + if self._limit < 0: + raise StopIteration() + return self._results[self._offset] else: raise StopIteration() @@ -92,6 +101,14 @@ class ResultSet(object): 'You must supply an object or function to be called.' ) + # We pop the ``limit``, if present, to track how many we should return + # to the user. This isn't the same as the ``limit`` that the low-level + # DDB api calls use (which limit page size, not the overall result set). + self._limit = kwargs.pop('limit', None) + + if self._limit < 0: + self._limit = None + self.the_callable = the_callable self.call_args = args self.call_kwargs = kwargs @@ -111,19 +128,34 @@ class ResultSet(object): if self._last_key_seen is not None: kwargs[self.first_key] = self._last_key_seen + # If the page size is greater than limit set them + # to the same value + if self._limit and self._max_page_size > self._limit: + self._max_page_size = self._limit + + # Put in the max page size. + if self._max_page_size is not None: + kwargs['limit'] = self._max_page_size + elif self._limit is not None: + # If max_page_size is not set and limit is available + # use it as the page size + kwargs['limit'] = self._limit + results = self.the_callable(*args, **kwargs) + self._fetches += 1 new_results = results.get('results', []) self._last_key_seen = results.get('last_key', None) if len(new_results): self._results.extend(results['results']) - # Decrease the limit, if it's present. - if self.call_kwargs.get('limit'): - self.call_kwargs['limit'] -= len(results['results']) - # and if limit hits zero, we don't have any more - # results to look for - if 0 == self.call_kwargs['limit']: + # Check the limit, if it's present. + if self._limit is not None and self._limit >= 0: + limit = self._limit + limit -= len(results['results']) + # If we've exceeded the limit, we don't have any more + # results to look for. + if limit <= 0: self._results_left = False if self._last_key_seen is None: diff --git a/awx/lib/site-packages/boto/dynamodb2/table.py b/awx/lib/site-packages/boto/dynamodb2/table.py index 78cc121b32..338ced19a0 100644 --- a/awx/lib/site-packages/boto/dynamodb2/table.py +++ b/awx/lib/site-packages/boto/dynamodb2/table.py @@ -8,6 +8,7 @@ from boto.dynamodb2.items import Item from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2.results import ResultSet, BatchGetResultSet from boto.dynamodb2.types import Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS +from boto.exception import JSONResponseError class Table(object): @@ -169,7 +170,7 @@ class Table(object): ... ], ... throughput={ ... 'read':10, - ... 'write":10, + ... 'write':10, ... }), ... ]) @@ -436,7 +437,7 @@ class Table(object): return raw_key - def get_item(self, consistent=False, **kwargs): + def get_item(self, consistent=False, attributes=None, **kwargs): """ Fetches an item (record) from a table in DynamoDB. @@ -448,6 +449,10 @@ class Table(object): a consistent (but more expensive) read from DynamoDB. (Default: ``False``) + Optionally accepts an ``attributes`` parameter, which should be a + list of fieldname to fetch. (Default: ``None``, which means all fields + should be fetched) + Returns an ``Item`` instance containing all the data for that record. Example:: @@ -480,12 +485,54 @@ class Table(object): item_data = self.connection.get_item( self.table_name, raw_key, + attributes_to_get=attributes, consistent_read=consistent ) + if 'Item' not in item_data: + raise exceptions.ItemNotFound("Item %s couldn't be found." % kwargs) item = Item(self) item.load(item_data) return item + def has_item(self, **kwargs): + """ + Return whether an item (record) exists within a table in DynamoDB. + + To specify the key of the item you'd like to get, you can specify the + key attributes as kwargs. + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will perform + a consistent (but more expensive) read from DynamoDB. + (Default: ``False``) + + Optionally accepts an ``attributes`` parameter, which should be a + list of fieldnames to fetch. (Default: ``None``, which means all fields + should be fetched) + + Returns ``True`` if an ``Item`` is present, ``False`` if not. + + Example:: + + # Simple, just hash-key schema. + >>> users.has_item(username='johndoe') + True + + # Complex schema, item not present. + >>> users.has_item( + ... username='johndoe', + ... date_joined='2014-01-07' + ... ) + False + + """ + try: + self.get_item(**kwargs) + except (JSONResponseError, exceptions.ItemNotFound): + return False + + return True + def lookup(self, *args, **kwargs): """ Look up an entry in DynamoDB. This is mostly backwards compatible @@ -524,7 +571,6 @@ class Table(object): data[self.schema[x].name] = arg return Item(self, data=data) - def put_item(self, data, overwrite=False): """ Saves an entire item to DynamoDB. @@ -755,7 +801,7 @@ class Table(object): return filters def query(self, limit=None, index=None, reverse=False, consistent=False, - attributes=None, **filter_kwargs): + attributes=None, max_page_size=None, **filter_kwargs): """ Queries for a set of matching items in a DynamoDB table. @@ -790,6 +836,12 @@ class Table(object): from DynamoDB. This uses the ``AttributesToGet`` and set's ``Select`` to ``SPECIFIC_ATTRIBUTES`` API. + Optionally accepts a ``max_page_size`` parameter, which should be an + integer count of the maximum number of items to retrieve + **per-request**. This is useful in making faster requests & prevent + the scan from drowning out other queries. (Default: ``None`` - + fetch as many as DynamoDB will return) + Returns a ``ResultSet``, which transparently handles the pagination of results you get back. @@ -830,17 +882,24 @@ class Table(object): """ if self.schema: - if len(self.schema) == 1 and len(filter_kwargs) <= 1: - raise exceptions.QueryError( - "You must specify more than one key to filter on." - ) + if len(self.schema) == 1: + if len(filter_kwargs) <= 1: + if not self.global_indexes or not len(self.global_indexes): + # If the schema only has one field, there's <= 1 filter + # param & no Global Secondary Indexes, this is user + # error. Bail early. + raise exceptions.QueryError( + "You must specify more than one key to filter on." + ) if attributes is not None: select = 'SPECIFIC_ATTRIBUTES' else: select = None - results = ResultSet() + results = ResultSet( + max_page_size=max_page_size + ) kwargs = filter_kwargs.copy() kwargs.update({ 'limit': limit, @@ -848,7 +907,7 @@ class Table(object): 'reverse': reverse, 'consistent': consistent, 'select': select, - 'attributes_to_get': attributes + 'attributes_to_get': attributes, }) results.to_call(self._query, **kwargs) return results @@ -961,7 +1020,7 @@ class Table(object): } def scan(self, limit=None, segment=None, total_segments=None, - **filter_kwargs): + max_page_size=None, attributes=None, **filter_kwargs): """ Scans across all items within a DynamoDB table. @@ -977,6 +1036,26 @@ class Table(object): count of the total number of items to return. (Default: ``None`` - all results) + Optionally accepts a ``segment`` parameter, which should be an integer + of the segment to retrieve on. Please see the documentation about + Parallel Scans (Default: ``None`` - no segments) + + Optionally accepts a ``total_segments`` parameter, which should be an + integer count of number of segments to divide the table into. + Please see the documentation about Parallel Scans (Default: ``None`` - + no segments) + + Optionally accepts a ``max_page_size`` parameter, which should be an + integer count of the maximum number of items to retrieve + **per-request**. This is useful in making faster requests & prevent + the scan from drowning out other queries. (Default: ``None`` - + fetch as many as DynamoDB will return) + + Optionally accepts an ``attributes`` parameter, which should be a + tuple. If you provide any attributes only these will be fetched + from DynamoDB. This uses the ``AttributesToGet`` and set's + ``Select`` to ``SPECIFIC_ATTRIBUTES`` API. + Returns a ``ResultSet``, which transparently handles the pagination of results you get back. @@ -1003,18 +1082,21 @@ class Table(object): 'Alice' """ - results = ResultSet() + results = ResultSet( + max_page_size=max_page_size + ) kwargs = filter_kwargs.copy() kwargs.update({ 'limit': limit, 'segment': segment, 'total_segments': total_segments, + 'attributes': attributes, }) results.to_call(self._scan, **kwargs) return results def _scan(self, limit=None, exclusive_start_key=None, segment=None, - total_segments=None, **filter_kwargs): + total_segments=None, attributes=None, **filter_kwargs): """ The internal method that performs the actual scan. Used extensively by ``ResultSet`` to perform each (paginated) request. @@ -1023,6 +1105,7 @@ class Table(object): 'limit': limit, 'segment': segment, 'total_segments': total_segments, + 'attributes_to_get': attributes, } if exclusive_start_key: diff --git a/awx/lib/site-packages/boto/ec2/__init__.py b/awx/lib/site-packages/boto/ec2/__init__.py index d0e18bf5be..c3976da19f 100644 --- a/awx/lib/site-packages/boto/ec2/__init__.py +++ b/awx/lib/site-packages/boto/ec2/__init__.py @@ -24,21 +24,10 @@ This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. """ from boto.ec2.connection import EC2Connection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions -RegionData = { - 'us-east-1': 'ec2.us-east-1.amazonaws.com', - 'us-gov-west-1': 'ec2.us-gov-west-1.amazonaws.com', - 'us-west-1': 'ec2.us-west-1.amazonaws.com', - 'us-west-2': 'ec2.us-west-2.amazonaws.com', - 'sa-east-1': 'ec2.sa-east-1.amazonaws.com', - 'eu-west-1': 'ec2.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'ec2.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'ec2.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'ec2.ap-southeast-2.amazonaws.com', - 'cn-north-1': 'ec2.cn-north-1.amazonaws.com.cn', -} +RegionData = load_regions().get('ec2', {}) def regions(**kw_params): @@ -51,13 +40,7 @@ def regions(**kw_params): :rtype: list :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=EC2Connection) - regions.append(region) - return regions + return get_regions('ec2', connection_cls=EC2Connection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/ec2/address.py b/awx/lib/site-packages/boto/ec2/address.py index 3b82d11588..ce1189ed45 100644 --- a/awx/lib/site-packages/boto/ec2/address.py +++ b/awx/lib/site-packages/boto/ec2/address.py @@ -89,14 +89,23 @@ class Address(EC2Object): delete = release - def associate(self, instance_id, dry_run=False): + def associate(self, instance_id, allow_reassociation=False, dry_run=False): """ Associate this Elastic IP address with a currently running instance. :see: :meth:`boto.ec2.connection.EC2Connection.associate_address` """ + if self.allocation_id: + return self.connection.associate_address( + instance_id, + self.public_ip, + allocation_id=self.allocation_id, + allow_reassociation=allow_reassociation, + dry_run=dry_run + ) return self.connection.associate_address( instance_id, self.public_ip, + allow_reassociation=allow_reassociation, dry_run=dry_run ) diff --git a/awx/lib/site-packages/boto/ec2/autoscale/__init__.py b/awx/lib/site-packages/boto/ec2/autoscale/__init__.py index 6e2c1036bd..2a0f692727 100644 --- a/awx/lib/site-packages/boto/ec2/autoscale/__init__.py +++ b/awx/lib/site-packages/boto/ec2/autoscale/__init__.py @@ -31,7 +31,7 @@ import base64 import boto from boto.connection import AWSQueryConnection -from boto.ec2.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions from boto.ec2.autoscale.request import Request from boto.ec2.autoscale.launchconfig import LaunchConfiguration from boto.ec2.autoscale.group import AutoScalingGroup @@ -44,19 +44,9 @@ from boto.ec2.autoscale.policy import TerminationPolicies from boto.ec2.autoscale.instance import Instance from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction from boto.ec2.autoscale.tag import Tag +from boto.ec2.autoscale.limits import AccountLimits -RegionData = { - 'us-east-1': 'autoscaling.us-east-1.amazonaws.com', - 'us-gov-west-1': 'autoscaling.us-gov-west-1.amazonaws.com', - 'us-west-1': 'autoscaling.us-west-1.amazonaws.com', - 'us-west-2': 'autoscaling.us-west-2.amazonaws.com', - 'sa-east-1': 'autoscaling.sa-east-1.amazonaws.com', - 'eu-west-1': 'autoscaling.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'autoscaling.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'autoscaling.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'autoscaling.ap-southeast-2.amazonaws.com', - 'cn-north-1': 'autoscaling.cn-north-1.amazonaws.com.cn', -} +RegionData = load_regions().get('autoscaling', {}) def regions(): @@ -66,13 +56,7 @@ def regions(): :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=AutoScaleConnection) - regions.append(region) - return regions + return get_regions('autoscaling', connection_cls=AutoScaleConnection) def connect_to_region(region_name, **kw_params): @@ -103,18 +87,22 @@ class AutoScaleConnection(AWSQueryConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None, + use_block_device_types=False): """ Init method to create a new connection to the AutoScaling service. B{Note:} The host argument is overridden by the host specified in the boto configuration file. + + """ if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, AutoScaleConnection) self.region = region + self.use_block_device_types = use_block_device_types super(AutoScaleConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, @@ -122,7 +110,8 @@ class AutoScaleConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path=path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -176,6 +165,8 @@ class AutoScaleConnection(AWSQueryConnection): params['DefaultCooldown'] = as_group.default_cooldown if as_group.placement_group: params['PlacementGroup'] = as_group.placement_group + if as_group.instance_id: + params['InstanceId'] = as_group.instance_id if as_group.termination_policies: self.build_list_params(params, as_group.termination_policies, 'TerminationPolicies') @@ -190,6 +181,16 @@ class AutoScaleConnection(AWSQueryConnection): tag.build_params(params, i + 1) return self.get_object(op, params, Request) + def attach_instances(self, name, instance_ids): + """ + Attach instances to an autoscaling group. + """ + params = { + 'AutoScalingGroupName': name, + } + self.build_list_params(params, instance_ids, 'InstanceIds') + return self.get_status('AttachInstances', params) + def create_auto_scaling_group(self, as_group): """ Create auto scaling group. @@ -246,9 +247,25 @@ class AutoScaleConnection(AWSQueryConnection): params['AssociatePublicIpAddress'] = 'true' elif launch_config.associate_public_ip_address is False: params['AssociatePublicIpAddress'] = 'false' + if launch_config.volume_type: + params['VolumeType'] = launch_config.volume_type + if launch_config.delete_on_termination: + params['DeleteOnTermination'] = 'true' + else: + params['DeleteOnTermination'] = 'false' + if launch_config.iops: + params['Iops'] = launch_config.iops return self.get_object('CreateLaunchConfiguration', params, Request, verb='POST') + def get_account_limits(self): + """ + Returns the limits for the Auto Scaling resources currently granted for + your AWS account. + """ + params = {} + return self.get_object('DescribeAccountLimits', params, AccountLimits) + def create_scaling_policy(self, scaling_policy): """ Creates a new Scaling Policy. diff --git a/awx/lib/site-packages/boto/ec2/autoscale/group.py b/awx/lib/site-packages/boto/ec2/autoscale/group.py index d3646877f4..c3c041275d 100644 --- a/awx/lib/site-packages/boto/ec2/autoscale/group.py +++ b/awx/lib/site-packages/boto/ec2/autoscale/group.py @@ -98,7 +98,8 @@ class AutoScalingGroup(object): health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, desired_capacity=None, min_size=None, max_size=None, - tags=None, termination_policies=None, **kwargs): + tags=None, termination_policies=None, instance_id=None, + **kwargs): """ Creates a new AutoScalingGroup with the specified name. @@ -145,12 +146,12 @@ class AutoScalingGroup(object): :param placement_group: Physical location of your cluster placement group created in Amazon EC2. - :type vpc_zone_identifier: str - :param vpc_zone_identifier: The subnet identifier of the Virtual - Private Cloud. - + :type vpc_zone_identifier: str or list + :param vpc_zone_identifier: A comma-separated string or python list of + the subnet identifiers of the Virtual Private Cloud. + :type tags: list - :param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s + :param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s :type termination_policies: list :param termination_policies: A list of termination policies. Valid values @@ -158,6 +159,10 @@ class AutoScalingGroup(object): "ClosestToNextInstanceHour", "Default". If no value is specified, the "Default" value is used. + :type instance_id: str + :param instance_id: The ID of the Amazon EC2 instance you want to use + to create the Auto Scaling group. + :rtype: :class:`boto.ec2.autoscale.group.AutoScalingGroup` :return: An autoscale group. """ @@ -183,11 +188,14 @@ class AutoScalingGroup(object): self.health_check_type = health_check_type self.placement_group = placement_group self.autoscaling_group_arn = None + if type(vpc_zone_identifier) is list: + vpc_zone_identifier = ','.join(vpc_zone_identifier) self.vpc_zone_identifier = vpc_zone_identifier self.instances = None self.tags = tags or None termination_policies = termination_policies or [] self.termination_policies = ListElement(termination_policies) + self.instance_id = instance_id # backwards compatible access to 'cooldown' param def _get_cooldown(self): @@ -251,6 +259,8 @@ class AutoScalingGroup(object): self.health_check_type = value elif name == 'VPCZoneIdentifier': self.vpc_zone_identifier = value + elif name == 'InstanceId': + self.instance_id = value else: setattr(self, name, value) @@ -304,7 +314,7 @@ class AutoScalingGroup(object): 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', - 'autoscaling:TEST_NOTIFICATION' + 'autoscaling:TEST_NOTIFICATION' """ return self.connection.put_notification_configuration(self, topic, diff --git a/awx/lib/site-packages/boto/ec2/autoscale/launchconfig.py b/awx/lib/site-packages/boto/ec2/autoscale/launchconfig.py index 7e31592964..a4e6ce9ae7 100644 --- a/awx/lib/site-packages/boto/ec2/autoscale/launchconfig.py +++ b/awx/lib/site-packages/boto/ec2/autoscale/launchconfig.py @@ -21,14 +21,16 @@ # IN THE SOFTWARE. from datetime import datetime -from boto.resultset import ResultSet from boto.ec2.elb.listelement import ListElement +# Namespacing issue with deprecated local class +from boto.ec2.blockdevicemapping import BlockDeviceMapping as BDM +from boto.resultset import ResultSet import boto.utils import base64 + # this should use the corresponding object from boto.ec2 - - +# Currently in use by deprecated local BlockDeviceMapping class class Ebs(object): def __init__(self, connection=None, snapshot_id=None, volume_size=None): self.connection = connection @@ -65,12 +67,16 @@ class InstanceMonitoring(object): # this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping +# Currently in use by deprecated code for backwards compatability +# Removing this class can also remove the Ebs class in this same file class BlockDeviceMapping(object): - def __init__(self, connection=None, device_name=None, virtual_name=None): + def __init__(self, connection=None, device_name=None, virtual_name=None, + ebs=None, no_device=None): self.connection = connection - self.device_name = None - self.virtual_name = None - self.ebs = None + self.device_name = device_name + self.virtual_name = virtual_name + self.ebs = ebs + self.no_device = no_device def __repr__(self): return 'BlockDeviceMapping(%s, %s)' % (self.device_name, @@ -86,6 +92,8 @@ class BlockDeviceMapping(object): self.device_name = value elif name == 'VirtualName': self.virtual_name = value + elif name == 'NoDevice': + self.no_device = bool(value) class LaunchConfiguration(object): @@ -95,7 +103,8 @@ class LaunchConfiguration(object): ramdisk_id=None, block_device_mappings=None, instance_monitoring=False, spot_price=None, instance_profile_name=None, ebs_optimized=False, - associate_public_ip_address=None): + associate_public_ip_address=None, volume_type=None, + delete_on_termination=True, iops=None, use_block_device_types=False): """ A launch configuration. @@ -147,8 +156,9 @@ class LaunchConfiguration(object): :param ebs_optimized: Specifies whether the instance is optimized for EBS I/O (true) or not (false). + :type associate_public_ip_address: bool - :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. + :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. """ self.connection = connection @@ -170,6 +180,13 @@ class LaunchConfiguration(object): self.launch_configuration_arn = None self.ebs_optimized = ebs_optimized self.associate_public_ip_address = associate_public_ip_address + self.volume_type = volume_type + self.delete_on_termination = delete_on_termination + self.iops = iops + self.use_block_device_types = use_block_device_types + + if connection is not None: + self.use_block_device_types = connection.use_block_device_types def __repr__(self): return 'LaunchConfiguration:%s' % self.name @@ -178,8 +195,10 @@ class LaunchConfiguration(object): if name == 'SecurityGroups': return self.security_groups elif name == 'BlockDeviceMappings': - self.block_device_mappings = ResultSet([('member', - BlockDeviceMapping)]) + if self.use_block_device_types: + self.block_device_mappings = BDM() + else: + self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)]) return self.block_device_mappings elif name == 'InstanceMonitoring': self.instance_monitoring = InstanceMonitoring(self) @@ -215,6 +234,17 @@ class LaunchConfiguration(object): self.instance_profile_name = value elif name == 'EbsOptimized': self.ebs_optimized = True if value.lower() == 'true' else False + elif name == 'AssociatePublicIpAddress': + self.associate_public_ip_address = True if value.lower() == 'true' else False + elif name == 'VolumeType': + self.volume_type = value + elif name == 'DeleteOnTermination': + if value.lower() == 'true': + self.delete_on_termination = True + else: + self.delete_on_termination = False + elif name == 'Iops': + self.iops = int(value) else: setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/ec2/autoscale/limits.py b/awx/lib/site-packages/boto/ec2/autoscale/limits.py new file mode 100644 index 0000000000..8472a20178 --- /dev/null +++ b/awx/lib/site-packages/boto/ec2/autoscale/limits.py @@ -0,0 +1,44 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class AccountLimits(object): + def __init__(self, connection=None): + self.connection = connection + self.max_autoscaling_groups = None + self.max_launch_configurations = None + + def __repr__(self): + return 'AccountLimits: [%s, %s]' % (self.max_autoscaling_groups, + self.max_launch_configurations) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'RequestId': + self.request_id = value + elif name == 'MaxNumberOfAutoScalingGroups': + self.max_autoscaling_groups = int(value) + elif name == 'MaxNumberOfLaunchConfigurations': + self.max_launch_configurations = int(value) + else: + setattr(self, name, value) + diff --git a/awx/lib/site-packages/boto/ec2/autoscale/policy.py b/awx/lib/site-packages/boto/ec2/autoscale/policy.py index 0538557d33..fbe7dc5fb6 100644 --- a/awx/lib/site-packages/boto/ec2/autoscale/policy.py +++ b/awx/lib/site-packages/boto/ec2/autoscale/policy.py @@ -47,16 +47,17 @@ class Alarm(object): class AdjustmentType(object): def __init__(self, connection=None): self.connection = connection - self.adjustment_types = ListElement([]) + self.adjustment_type = None def __repr__(self): - return 'AdjustmentType:%s' % self.adjustment_types + return 'AdjustmentType:%s' % self.adjustment_type def startElement(self, name, attrs, connection): - if name == 'AdjustmentType': - return self.adjustment_types + return def endElement(self, name, value, connection): + if name == 'AdjustmentType': + self.adjustment_type = value return diff --git a/awx/lib/site-packages/boto/ec2/blockdevicemapping.py b/awx/lib/site-packages/boto/ec2/blockdevicemapping.py index 78f7e61da0..65ffbb1db1 100644 --- a/awx/lib/site-packages/boto/ec2/blockdevicemapping.py +++ b/awx/lib/site-packages/boto/ec2/blockdevicemapping.py @@ -55,25 +55,26 @@ class BlockDeviceType(object): pass def endElement(self, name, value, connection): + lname = name.lower() if name == 'volumeId': self.volume_id = value - elif name == 'virtualName': + elif lname == 'virtualname': self.ephemeral_name = value - elif name == 'NoDevice': + elif lname == 'nodevice': self.no_device = (value == 'true') - elif name == 'snapshotId': + elif lname == 'snapshotid': self.snapshot_id = value - elif name == 'volumeSize': + elif lname == 'volumesize': self.size = int(value) - elif name == 'status': + elif lname == 'status': self.status = value - elif name == 'attachTime': + elif lname == 'attachtime': self.attach_time = value - elif name == 'deleteOnTermination': + elif lname == 'deleteontermination': self.delete_on_termination = (value == 'true') - elif name == 'volumeType': + elif lname == 'volumetype': self.volume_type = value - elif name == 'iops': + elif lname == 'iops': self.iops = int(value) else: setattr(self, name, value) @@ -105,14 +106,16 @@ class BlockDeviceMapping(dict): self.current_value = None def startElement(self, name, attrs, connection): - if name == 'ebs' or name == 'virtualName': + lname = name.lower() + if lname in ['ebs', 'virtualname']: self.current_value = BlockDeviceType(self) return self.current_value def endElement(self, name, value, connection): - if name == 'device' or name == 'deviceName': + lname = name.lower() + if lname in ['device', 'devicename']: self.current_name = value - elif name == 'item': + elif lname in ['item', 'member']: self[self.current_name] = self.current_value def ec2_build_list_params(self, params, prefix=''): diff --git a/awx/lib/site-packages/boto/ec2/cloudwatch/__init__.py b/awx/lib/site-packages/boto/ec2/cloudwatch/__init__.py index 6bfcdbf36c..ba3376b131 100644 --- a/awx/lib/site-packages/boto/ec2/cloudwatch/__init__.py +++ b/awx/lib/site-packages/boto/ec2/cloudwatch/__init__.py @@ -28,21 +28,10 @@ from boto.connection import AWSQueryConnection from boto.ec2.cloudwatch.metric import Metric from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem from boto.ec2.cloudwatch.datapoint import Datapoint -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions import boto -RegionData = { - 'us-east-1': 'monitoring.us-east-1.amazonaws.com', - 'us-gov-west-1': 'monitoring.us-gov-west-1.amazonaws.com', - 'us-west-1': 'monitoring.us-west-1.amazonaws.com', - 'us-west-2': 'monitoring.us-west-2.amazonaws.com', - 'sa-east-1': 'monitoring.sa-east-1.amazonaws.com', - 'eu-west-1': 'monitoring.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'monitoring.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'monitoring.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'monitoring.ap-southeast-2.amazonaws.com', - 'cn-north-1': 'monitoring.cn-north-1.amazonaws.com.cn', -} +RegionData = load_regions().get('cloudwatch', {}) def regions(): @@ -52,13 +41,7 @@ def regions(): :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=CloudWatchConnection) - regions.append(region) - return regions + return get_regions('cloudwatch', connection_cls=CloudWatchConnection) def connect_to_region(region_name, **kw_params): @@ -91,7 +74,7 @@ class CloudWatchConnection(AWSQueryConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None): """ Init method to create a new connection to EC2 Monitoring Service. @@ -115,7 +98,8 @@ class CloudWatchConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -178,11 +162,11 @@ class CloudWatchConnection(AWSQueryConnection): metric_data['StatisticValues.Minimum'] = s['minimum'] metric_data['StatisticValues.SampleCount'] = s['samplecount'] metric_data['StatisticValues.Sum'] = s['sum'] - if value != None: + if value is not None: msg = 'You supplied a value and statistics for a ' + \ 'metric.Posting statistics and not value.' boto.log.warn(msg) - elif value != None: + elif value is not None: metric_data['Value'] = v else: raise Exception('Must specify a value or statistics to put.') @@ -273,9 +257,13 @@ class CloudWatchConnection(AWSQueryConnection): pairs that will be used to filter the results. The key in the dictionary is the name of a Dimension. The value in the dictionary is either a scalar value of that Dimension - name that you want to filter on, a list of values to - filter on or None if you want all metrics with that - Dimension name. + name that you want to filter on or None if you want all + metrics with that Dimension name. To be included in the + result a metric must contain all specified dimensions, + although the metric may contain additional dimensions beyond + the requested metrics. The Dimension names, and values must + be strings between 1 and 250 characters long. A maximum of + 10 dimensions are allowed. :type metric_name: str :param metric_name: The name of the Metric to filter against. If None, diff --git a/awx/lib/site-packages/boto/ec2/connection.py b/awx/lib/site-packages/boto/ec2/connection.py index 045a145138..9e2d4b1925 100644 --- a/awx/lib/site-packages/boto/ec2/connection.py +++ b/awx/lib/site-packages/boto/ec2/connection.py @@ -58,7 +58,7 @@ from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription from boto.ec2.bundleinstance import BundleInstanceTask from boto.ec2.placementgroup import PlacementGroup from boto.ec2.tag import Tag -from boto.ec2.vmtype import VmType +from boto.ec2.instancetype import InstanceType from boto.ec2.instancestatus import InstanceStatusSet from boto.ec2.volumestatus import VolumeStatusSet from boto.ec2.networkinterface import NetworkInterface @@ -83,7 +83,7 @@ class EC2Connection(AWSQueryConnection): proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', api_version=None, security_token=None, - validate_certs=True): + validate_certs=True, profile_name=None): """ Init method to create a new connection to EC2. """ @@ -98,7 +98,8 @@ class EC2Connection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) if api_version: self.APIVersion = api_version @@ -734,8 +735,8 @@ class EC2Connection(AWSQueryConnection): launch instances. :type security_groups: list of strings - :param security_groups: The names of the security groups with which to - associate instances. + :param security_groups: The names of the EC2 classic security groups + with which to associate instances :type user_data: string :param user_data: The Base64-encoded MIME user data to be made @@ -749,6 +750,8 @@ class EC2Connection(AWSQueryConnection): * m1.medium * m1.large * m1.xlarge + * m3.medium + * m3.large * m3.xlarge * m3.2xlarge * c1.medium @@ -1301,7 +1304,8 @@ class EC2Connection(AWSQueryConnection): def get_spot_price_history(self, start_time=None, end_time=None, instance_type=None, product_description=None, availability_zone=None, dry_run=False, - max_results=None): + max_results=None, next_token=None, + filters=None): """ Retrieve the recent history of spot instances pricing. @@ -1339,6 +1343,19 @@ class EC2Connection(AWSQueryConnection): :param max_results: The maximum number of paginated items per response. + :type next_token: str + :param next_token: The next set of rows to return. This should + be the value of the ``next_token`` attribute from a previous + call to ``get_spot_price_history``. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + :rtype: list :return: A list tuples containing price and timestamp. """ @@ -1357,6 +1374,10 @@ class EC2Connection(AWSQueryConnection): params['DryRun'] = 'true' if max_results is not None: params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) return self.get_list('DescribeSpotPriceHistory', params, [('item', SpotPriceHistory)], verb='POST') @@ -1424,6 +1445,8 @@ class EC2Connection(AWSQueryConnection): * m1.medium * m1.large * m1.xlarge + * m3.medium + * m3.large * m3.xlarge * m3.2xlarge * c1.medium @@ -1814,6 +1837,37 @@ class EC2Connection(AWSQueryConnection): return self.get_status('AssignPrivateIpAddresses', params, verb='POST') + def _associate_address(self, status, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + elif network_interface_id is not None: + params['NetworkInterfaceId'] = network_interface_id + + # Allocation id trumps public ip in order to associate with VPCs + if allocation_id is not None: + params['AllocationId'] = allocation_id + elif public_ip is not None: + params['PublicIp'] = public_ip + + if private_ip_address is not None: + params['PrivateIpAddress'] = private_ip_address + + if allow_reassociation: + params['AllowReassociation'] = 'true' + + if dry_run: + params['DryRun'] = 'true' + + if status: + return self.get_status('AssociateAddress', params, verb='POST') + else: + return self.get_object('AssociateAddress', params, Address, + verb='POST') + def associate_address(self, instance_id=None, public_ip=None, allocation_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False, @@ -1856,27 +1910,59 @@ class EC2Connection(AWSQueryConnection): :rtype: bool :return: True if successful """ - params = {} - if instance_id is not None: - params['InstanceId'] = instance_id - elif network_interface_id is not None: - params['NetworkInterfaceId'] = network_interface_id + return self._associate_address(True, instance_id=instance_id, + public_ip=public_ip, allocation_id=allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, dry_run=dry_run) - if public_ip is not None: - params['PublicIp'] = public_ip - elif allocation_id is not None: - params['AllocationId'] = allocation_id + def associate_address_object(self, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + """ + Associate an Elastic IP address with a currently running instance. + This requires one of ``public_ip`` or ``allocation_id`` depending + on if you're associating a VPC address or a plain EC2 address. - if private_ip_address is not None: - params['PrivateIpAddress'] = private_ip_address + When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` + as EC2 expects a single parameter and if ``public_ip`` is passed boto + will preference that instead of ``allocation_id``. - if allow_reassociation: - params['AllowReassociation'] = 'true' + :type instance_id: string + :param instance_id: The ID of the instance - if dry_run: - params['DryRun'] = 'true' + :type public_ip: string + :param public_ip: The public IP address for EC2 based allocations. - return self.get_status('AssociateAddress', params, verb='POST') + :type allocation_id: string + :param allocation_id: The allocation ID for a VPC-based elastic IP. + + :type network_interface_id: string + :param network_interface_id: The network interface ID to which + elastic IP is to be assigned to + + :type private_ip_address: string + :param private_ip_address: The primary or secondary private IP address + to associate with the Elastic IP address. + + :type allow_reassociation: bool + :param allow_reassociation: Specify this option to allow an Elastic IP + address that is already associated with another network interface + or instance to be re-associated with the specified instance or + interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: class:`boto.ec2.address.Address` + :return: The associated address instance + """ + return self._associate_address(False, instance_id=instance_id, + public_ip=public_ip, allocation_id=allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, dry_run=dry_run) def disassociate_address(self, public_ip=None, association_id=None, dry_run=False): @@ -1897,10 +1983,12 @@ class EC2Connection(AWSQueryConnection): """ params = {} - if public_ip is not None: - params['PublicIp'] = public_ip - elif association_id is not None: + # If there is an association id it trumps public ip + # in order to successfully dissassociate with a VPC elastic ip + if association_id is not None: params['AssociationId'] = association_id + elif public_ip is not None: + params['PublicIp'] = public_ip if dry_run: params['DryRun'] = 'true' @@ -4236,15 +4324,15 @@ class EC2Connection(AWSQueryConnection): params['DryRun'] = 'true' return self.get_status('DeleteNetworkInterface', params, verb='POST') - def get_all_vmtypes(self): + def get_all_instance_types(self): """ - Get all vmtypes available on this cloud (eucalyptus specific) + Get all instance_types available on this cloud (eucalyptus specific) - :rtype: list of :class:`boto.ec2.vmtype.VmType` - :return: The requested VmType objects + :rtype: list of :class:`boto.ec2.instancetype.InstanceType` + :return: The requested InstanceType objects """ params = {} - return self.get_list('DescribeVmTypes', params, [('euca:item', VmType)], verb='POST') + return self.get_list('DescribeInstanceTypes', params, [('item', InstanceType)], verb='POST') def copy_image(self, source_region, source_image_id, name=None, description=None, client_token=None, dry_run=False): diff --git a/awx/lib/site-packages/boto/ec2/elb/__init__.py b/awx/lib/site-packages/boto/ec2/elb/__init__.py index 1ad60f0a7f..d308c72639 100644 --- a/awx/lib/site-packages/boto/ec2/elb/__init__.py +++ b/awx/lib/site-packages/boto/ec2/elb/__init__.py @@ -31,21 +31,10 @@ from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones from boto.ec2.elb.instancestate import InstanceState from boto.ec2.elb.healthcheck import HealthCheck from boto.ec2.elb.listelement import ListElement -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions, load_regions import boto -RegionData = { - 'us-east-1': 'elasticloadbalancing.us-east-1.amazonaws.com', - 'us-gov-west-1': 'elasticloadbalancing.us-gov-west-1.amazonaws.com', - 'us-west-1': 'elasticloadbalancing.us-west-1.amazonaws.com', - 'us-west-2': 'elasticloadbalancing.us-west-2.amazonaws.com', - 'sa-east-1': 'elasticloadbalancing.sa-east-1.amazonaws.com', - 'eu-west-1': 'elasticloadbalancing.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'elasticloadbalancing.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'elasticloadbalancing.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'elasticloadbalancing.ap-southeast-2.amazonaws.com', - 'cn-north-1': 'elasticloadbalancing.cn-north-1.amazonaws.com.cn', -} +RegionData = load_regions().get('elasticloadbalancing', {}) def regions(): @@ -55,13 +44,7 @@ def regions(): :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=ELBConnection) - regions.append(region) - return regions + return get_regions('elasticloadbalancing', connection_cls=ELBConnection) def connect_to_region(region_name, **kw_params): @@ -92,7 +75,7 @@ class ELBConnection(AWSQueryConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None): """ Init method to create a new connection to EC2 Load Balancing Service. @@ -110,13 +93,14 @@ class ELBConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['ec2'] def build_list_params(self, params, items, label): - if isinstance(items, str): + if isinstance(items, basestring): items = [items] for index, item in enumerate(items): params[label % (index + 1)] = item @@ -401,6 +385,7 @@ class ELBConnection(AWSQueryConnection): :param attribute: The attribute you wish to change. * crossZoneLoadBalancing - Boolean (true) + * accessLog - :py:class:`AccessLogAttribute` instance :type value: string :param value: The new value for the attribute @@ -421,6 +406,15 @@ class ELBConnection(AWSQueryConnection): if attribute.lower() == 'crosszoneloadbalancing': params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled' ] = value + elif attribute.lower() == 'accesslog': + params['LoadBalancerAttributes.AccessLog.Enabled'] = \ + value.enabled and 'true' or 'false' + params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \ + value.s3_bucket_name + params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \ + value.s3_bucket_prefix + params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \ + value.emit_interval else: raise ValueError('InvalidAttribute', attribute) return self.get_status('ModifyLoadBalancerAttributes', params, diff --git a/awx/lib/site-packages/boto/ec2/elb/attributes.py b/awx/lib/site-packages/boto/ec2/elb/attributes.py index 0d70a642f6..edf3eedcc0 100644 --- a/awx/lib/site-packages/boto/ec2/elb/attributes.py +++ b/awx/lib/site-packages/boto/ec2/elb/attributes.py @@ -40,6 +40,41 @@ class CrossZoneLoadBalancingAttribute(object): else: self.enabled = False +class AccessLogAttribute(object): + """ + Represents the AccessLog segment of ELB attributes. + """ + def __init__(self, connection=None): + self.enabled = None + self.s3_bucket_name = None + self.s3_bucket_prefix = None + self.emit_interval = None + + def __repr__(self): + return 'AccessLog(%s, %s, %s, %s)' % ( + self.enabled, + self.s3_bucket_name, + self.s3_bucket_prefix, + self.emit_interval + ) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'S3BucketName': + self.s3_bucket_name = value + elif name == 'S3BucketPrefix': + self.s3_bucket_prefix = value + elif name == 'EmitInterval': + self.emit_interval = int(value) + + class LbAttributes(object): """ Represents the Attributes of an Elastic Load Balancer. @@ -48,14 +83,18 @@ class LbAttributes(object): self.connection = connection self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute( self.connection) + self.access_log = AccessLogAttribute(self.connection) def __repr__(self): - return 'LbAttributes(%s)' % ( - repr(self.cross_zone_load_balancing)) + return 'LbAttributes(%s, %s)' % ( + repr(self.cross_zone_load_balancing), + repr(self.access_log)) def startElement(self, name, attrs, connection): if name == 'CrossZoneLoadBalancing': return self.cross_zone_load_balancing - + if name == 'AccessLog': + return self.access_log + def endElement(self, name, value, connection): pass diff --git a/awx/lib/site-packages/boto/ec2/elb/loadbalancer.py b/awx/lib/site-packages/boto/ec2/elb/loadbalancer.py index f6ed3dec42..f76feb15cc 100644 --- a/awx/lib/site-packages/boto/ec2/elb/loadbalancer.py +++ b/awx/lib/site-packages/boto/ec2/elb/loadbalancer.py @@ -186,7 +186,7 @@ class LoadBalancer(object): :param zones: The name of the zone(s) to add. """ - if isinstance(zones, str) or isinstance(zones, unicode): + if isinstance(zones, basestring): zones = [zones] new_zones = self.connection.enable_availability_zones(self.name, zones) self.availability_zones = new_zones @@ -199,7 +199,7 @@ class LoadBalancer(object): :param zones: The name of the zone(s) to add. """ - if isinstance(zones, str) or isinstance(zones, unicode): + if isinstance(zones, basestring): zones = [zones] new_zones = self.connection.disable_availability_zones(self.name, zones) self.availability_zones = new_zones @@ -266,7 +266,7 @@ class LoadBalancer(object): to add to this load balancer. """ - if isinstance(instances, str) or isinstance(instances, unicode): + if isinstance(instances, basestring): instances = [instances] new_instances = self.connection.register_instances(self.name, instances) @@ -281,7 +281,7 @@ class LoadBalancer(object): to remove from this load balancer. """ - if isinstance(instances, str) or isinstance(instances, unicode): + if isinstance(instances, basestring): instances = [instances] new_instances = self.connection.deregister_instances(self.name, instances) @@ -324,7 +324,7 @@ class LoadBalancer(object): listeners) def create_listener(self, inPort, outPort=None, proto="tcp"): - if outPort == None: + if outPort is None: outPort = inPort return self.create_listeners([(inPort, outPort, proto)]) @@ -380,7 +380,7 @@ class LoadBalancer(object): :param subnets: The name of the subnet(s) to add. """ - if isinstance(subnets, str) or isinstance(subnets, unicode): + if isinstance(subnets, basestring): subnets = [subnets] new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets) self.subnets = new_subnets @@ -393,7 +393,7 @@ class LoadBalancer(object): :param subnets: The name of the subnet(s) to detach. """ - if isinstance(subnets, str) or isinstance(subnets, unicode): + if isinstance(subnets, basestring): subnets = [subnets] new_subnets = self.connection.detach_lb_from_subnets(self.name, subnets) self.subnets = new_subnets @@ -408,8 +408,7 @@ class LoadBalancer(object): :param security_groups: The name of the security group(s) to add. """ - if isinstance(security_groups, str) or \ - isinstance(security_groups, unicode): + if isinstance(security_groups, basestring): security_groups = [security_groups] new_sgs = self.connection.apply_security_groups_to_lb( self.name, security_groups) diff --git a/awx/lib/site-packages/boto/ec2/image.py b/awx/lib/site-packages/boto/ec2/image.py index 08203a25d7..807811dcc8 100644 --- a/awx/lib/site-packages/boto/ec2/image.py +++ b/awx/lib/site-packages/boto/ec2/image.py @@ -208,6 +208,8 @@ class Image(TaggedEC2Object): * m1.medium * m1.large * m1.xlarge + * m3.medium + * m3.large * m3.xlarge * m3.2xlarge * c1.medium diff --git a/awx/lib/site-packages/boto/ec2/instancetype.py b/awx/lib/site-packages/boto/ec2/instancetype.py new file mode 100644 index 0000000000..2de4b8532e --- /dev/null +++ b/awx/lib/site-packages/boto/ec2/instancetype.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from boto.ec2.ec2object import EC2Object + + +class InstanceType(EC2Object): + """ + Represents an EC2 VM Type + + :ivar name: The name of the vm type + :ivar cores: The number of cpu cores for this vm type + :ivar memory: The amount of memory in megabytes for this vm type + :ivar disk: The amount of disk space in gigabytes for this vm type + """ + + def __init__(self, connection=None, name=None, cores=None, + memory=None, disk=None): + super(InstanceType, self).__init__(connection) + self.connection = connection + self.name = name + self.cores = cores + self.memory = memory + self.disk = disk + + def __repr__(self): + return 'InstanceType:%s-%s,%s,%s' % (self.name, self.cores, + self.memory, self.disk) + + def endElement(self, name, value, connection): + if name == 'name': + self.name = value + elif name == 'cpu': + self.cores = value + elif name == 'disk': + self.disk = value + elif name == 'memory': + self.memory = value + else: + setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/ec2/regioninfo.py b/awx/lib/site-packages/boto/ec2/regioninfo.py index 1b6c6ad934..78cd757c2f 100644 --- a/awx/lib/site-packages/boto/ec2/regioninfo.py +++ b/awx/lib/site-packages/boto/ec2/regioninfo.py @@ -28,7 +28,8 @@ class EC2RegionInfo(RegionInfo): Represents an EC2 Region """ - def __init__(self, connection=None, name=None, endpoint=None): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): from boto.ec2.connection import EC2Connection super(EC2RegionInfo, self).__init__(connection, name, endpoint, EC2Connection) diff --git a/awx/lib/site-packages/boto/ecs/__init__.py b/awx/lib/site-packages/boto/ecs/__init__.py index f39ec5a50c..d643afc76a 100644 --- a/awx/lib/site-packages/boto/ecs/__init__.py +++ b/awx/lib/site-packages/boto/ecs/__init__.py @@ -41,10 +41,13 @@ class ECSConnection(AWSQueryConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com', - debug=0, https_connection_factory=None, path='/'): + debug=0, https_connection_factory=None, path='/', + security_token=None, profile_name=None): super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, - host, debug, https_connection_factory, path) + host, debug, https_connection_factory, path, + security_token=security_token, + profile_name=profile_name) def _required_auth_capability(self): return ['ecs'] @@ -66,7 +69,7 @@ class ECSConnection(AWSQueryConnection): boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) - if itemSet == None: + if itemSet is None: rs = ItemSet(self, action, params, page) else: rs = itemSet diff --git a/awx/lib/site-packages/boto/ecs/item.py b/awx/lib/site-packages/boto/ecs/item.py index d0cdb99017..4349e41e1d 100644 --- a/awx/lib/site-packages/boto/ecs/item.py +++ b/awx/lib/site-packages/boto/ecs/item.py @@ -110,7 +110,7 @@ class ItemSet(ResponseGroup): def startElement(self, name, attrs, connection): if name == "Item": self.curItem = Item(self._connection) - elif self.curItem != None: + elif self.curItem is not None: self.curItem.startElement(name, attrs, connection) return None @@ -123,13 +123,13 @@ class ItemSet(ResponseGroup): self.objs.append(self.curItem) self._xml.write(self.curItem.to_xml()) self.curItem = None - elif self.curItem != None: + elif self.curItem is not None: self.curItem.endElement(name, value, connection) return None def next(self): """Special paging functionality""" - if self.iter == None: + if self.iter is None: self.iter = iter(self.objs) try: return self.iter.next() diff --git a/awx/lib/site-packages/boto/elasticache/__init__.py b/awx/lib/site-packages/boto/elasticache/__init__.py index 1759a17ef5..73d28c9f96 100644 --- a/awx/lib/site-packages/boto/elasticache/__init__.py +++ b/awx/lib/site-packages/boto/elasticache/__init__.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,34 +31,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.elasticache.layer1 import ElastiCacheConnection - return [RegionInfo(name='us-east-1', - endpoint='elasticache.us-east-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='us-west-1', - endpoint='elasticache.us-west-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='us-west-2', - endpoint='elasticache.us-west-2.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='eu-west-1', - endpoint='elasticache.eu-west-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='ap-northeast-1', - endpoint='elasticache.ap-northeast-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='ap-southeast-1', - endpoint='elasticache.ap-southeast-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='ap-southeast-2', - endpoint='elasticache.ap-southeast-2.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='sa-east-1', - endpoint='elasticache.sa-east-1.amazonaws.com', - connection_cls=ElastiCacheConnection), - RegionInfo(name='cn-north-1', - endpoint='elasticache.cn-north-1.amazonaws.com.cn', - connection_cls=ElastiCacheConnection), - ] + return get_regions('elasticache', connection_cls=ElastiCacheConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/elastictranscoder/__init__.py b/awx/lib/site-packages/boto/elastictranscoder/__init__.py index c53bc0cec1..afb23e56c8 100644 --- a/awx/lib/site-packages/boto/elastictranscoder/__init__.py +++ b/awx/lib/site-packages/boto/elastictranscoder/__init__.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,27 +31,10 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.elastictranscoder.layer1 import ElasticTranscoderConnection - cls = ElasticTranscoderConnection - return [ - RegionInfo(name='us-east-1', - endpoint='elastictranscoder.us-east-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='us-west-1', - endpoint='elastictranscoder.us-west-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='us-west-2', - endpoint='elastictranscoder.us-west-2.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-northeast-1', - endpoint='elastictranscoder.ap-northeast-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-southeast-1', - endpoint='elastictranscoder.ap-southeast-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='eu-west-1', - endpoint='elastictranscoder.eu-west-1.amazonaws.com', - connection_cls=cls), - ] + return get_regions( + 'elastictranscoder', + connection_cls=ElasticTranscoderConnection + ) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/emr/__init__.py b/awx/lib/site-packages/boto/emr/__init__.py index 0bd48ad3b3..b04d08fecd 100644 --- a/awx/lib/site-packages/boto/emr/__init__.py +++ b/awx/lib/site-packages/boto/emr/__init__.py @@ -29,7 +29,7 @@ service from AWS. from connection import EmrConnection from step import Step, StreamingStep, JarStep from bootstrap_action import BootstrapAction -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -39,34 +39,7 @@ def regions(): :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` """ - return [RegionInfo(name='us-east-1', - endpoint='elasticmapreduce.us-east-1.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='us-west-1', - endpoint='us-west-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='us-west-2', - endpoint='us-west-2.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='ap-northeast-1', - endpoint='ap-northeast-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='ap-southeast-1', - endpoint='ap-southeast-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='ap-southeast-2', - endpoint='ap-southeast-2.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='eu-west-1', - endpoint='eu-west-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='sa-east-1', - endpoint='sa-east-1.elasticmapreduce.amazonaws.com', - connection_cls=EmrConnection), - RegionInfo(name='cn-north-1', - endpoint='elasticmapreduce.cn-north-1.amazonaws.com.cn', - connection_cls=EmrConnection), - ] + return get_regions('elasticmapreduce', connection_cls=EmrConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/emr/connection.py b/awx/lib/site-packages/boto/emr/connection.py index e7b5b74782..6c5222ad1a 100644 --- a/awx/lib/site-packages/boto/emr/connection.py +++ b/awx/lib/site-packages/boto/emr/connection.py @@ -55,7 +55,7 @@ class EmrConnection(AWSQueryConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) @@ -67,7 +67,8 @@ class EmrConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) # Many of the EMR hostnames are of the form: # ..amazonaws.com # rather than the more common: @@ -265,7 +266,7 @@ class EmrConnection(AWSQueryConnection): if step_states: self.build_list_params(params, step_states, 'StepStateList.member') - self.get_object('ListSteps', params, StepSummaryList) + return self.get_object('ListSteps', params, StepSummaryList) def add_tags(self, resource_id, tags): """ diff --git a/awx/lib/site-packages/boto/emr/emrobject.py b/awx/lib/site-packages/boto/emr/emrobject.py index a397a55a0d..0906bfabbe 100644 --- a/awx/lib/site-packages/boto/emr/emrobject.py +++ b/awx/lib/site-packages/boto/emr/emrobject.py @@ -262,11 +262,12 @@ class Cluster(EmrObject): if name == 'Status': self.status = ClusterStatus() return self.status - elif name == 'EC2InstanceAttributes': + elif name == 'Ec2InstanceAttributes': self.ec2instanceattributes = Ec2InstanceAttributes() return self.ec2instanceattributes elif name == 'Applications': self.applications = ResultSet([('member', Application)]) + return self.applications elif name == 'Tags': self.tags = ResultSet([('member', KeyValue)]) return self.tags diff --git a/awx/lib/site-packages/boto/endpoints.json b/awx/lib/site-packages/boto/endpoints.json new file mode 100644 index 0000000000..bf52525f5e --- /dev/null +++ b/awx/lib/site-packages/boto/endpoints.json @@ -0,0 +1,307 @@ +{ + "autoscaling": { + "ap-northeast-1": "autoscaling.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "autoscaling.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "autoscaling.ap-southeast-2.amazonaws.com", + "cn-north-1": "autoscaling.cn-north-1.amazonaws.com.cn", + "eu-west-1": "autoscaling.eu-west-1.amazonaws.com", + "sa-east-1": "autoscaling.sa-east-1.amazonaws.com", + "us-east-1": "autoscaling.us-east-1.amazonaws.com", + "us-gov-west-1": "autoscaling.us-gov-west-1.amazonaws.com", + "us-west-1": "autoscaling.us-west-1.amazonaws.com", + "us-west-2": "autoscaling.us-west-2.amazonaws.com" + }, + "cloudformation": { + "ap-northeast-1": "cloudformation.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "cloudformation.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cloudformation.ap-southeast-2.amazonaws.com", + "cn-north-1": "cloudformation.cn-north-1.amazonaws.com.cn", + "eu-west-1": "cloudformation.eu-west-1.amazonaws.com", + "sa-east-1": "cloudformation.sa-east-1.amazonaws.com", + "us-east-1": "cloudformation.us-east-1.amazonaws.com", + "us-west-1": "cloudformation.us-west-1.amazonaws.com", + "us-west-2": "cloudformation.us-west-2.amazonaws.com" + }, + "cloudfront": { + "ap-northeast-1": "cloudfront.amazonaws.com", + "ap-southeast-1": "cloudfront.amazonaws.com", + "ap-southeast-2": "cloudfront.amazonaws.com", + "eu-west-1": "cloudfront.amazonaws.com", + "sa-east-1": "cloudfront.amazonaws.com", + "us-east-1": "cloudfront.amazonaws.com", + "us-west-1": "cloudfront.amazonaws.com", + "us-west-2": "cloudfront.amazonaws.com" + }, + "cloudsearch": { + "ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com", + "eu-west-1": "cloudsearch.eu-west-1.amazonaws.com", + "us-east-1": "cloudsearch.us-east-1.amazonaws.com", + "us-west-1": "cloudsearch.us-west-1.amazonaws.com", + "us-west-2": "cloudsearch.us-west-2.amazonaws.com" + }, + "cloudtrail": { + "us-east-1": "cloudtrail.us-east-1.amazonaws.com", + "us-west-2": "cloudtrail.us-west-2.amazonaws.com" + }, + "cloudwatch": { + "ap-northeast-1": "monitoring.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "monitoring.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "monitoring.ap-southeast-2.amazonaws.com", + "cn-north-1": "monitoring.cn-north-1.amazonaws.com.cn", + "eu-west-1": "monitoring.eu-west-1.amazonaws.com", + "sa-east-1": "monitoring.sa-east-1.amazonaws.com", + "us-east-1": "monitoring.us-east-1.amazonaws.com", + "us-gov-west-1": "monitoring.us-gov-west-1.amazonaws.com", + "us-west-1": "monitoring.us-west-1.amazonaws.com", + "us-west-2": "monitoring.us-west-2.amazonaws.com" + }, + "datapipeline": { + "us-east-1": "datapipeline.us-east-1.amazonaws.com", + "us-west-2": "datapipeline.us-west-2.amazonaws.com", + "eu-west-1": "datapipeline.eu-west-1.amazonaws.com", + "ap-southeast-2": "datapipeline.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com" + }, + "directconnect": { + "ap-northeast-1": "directconnect.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "directconnect.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "directconnect.ap-southeast-2.amazonaws.com", + "eu-west-1": "directconnect.eu-west-1.amazonaws.com", + "sa-east-1": "directconnect.sa-east-1.amazonaws.com", + "us-east-1": "directconnect.us-east-1.amazonaws.com", + "us-west-1": "directconnect.us-west-1.amazonaws.com", + "us-west-2": "directconnect.us-west-2.amazonaws.com" + }, + "dynamodb": { + "ap-northeast-1": "dynamodb.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "dynamodb.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "dynamodb.ap-southeast-2.amazonaws.com", + "cn-north-1": "dynamodb.cn-north-1.amazonaws.com.cn", + "eu-west-1": "dynamodb.eu-west-1.amazonaws.com", + "sa-east-1": "dynamodb.sa-east-1.amazonaws.com", + "us-east-1": "dynamodb.us-east-1.amazonaws.com", + "us-gov-west-1": "dynamodb.us-gov-west-1.amazonaws.com", + "us-west-1": "dynamodb.us-west-1.amazonaws.com", + "us-west-2": "dynamodb.us-west-2.amazonaws.com" + }, + "ec2": { + "ap-northeast-1": "ec2.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "ec2.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "ec2.ap-southeast-2.amazonaws.com", + "cn-north-1": "ec2.cn-north-1.amazonaws.com.cn", + "eu-west-1": "ec2.eu-west-1.amazonaws.com", + "sa-east-1": "ec2.sa-east-1.amazonaws.com", + "us-east-1": "ec2.us-east-1.amazonaws.com", + "us-gov-west-1": "ec2.us-gov-west-1.amazonaws.com", + "us-west-1": "ec2.us-west-1.amazonaws.com", + "us-west-2": "ec2.us-west-2.amazonaws.com" + }, + "elasticache": { + "ap-northeast-1": "elasticache.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticache.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticache.ap-southeast-2.amazonaws.com", + "cn-north-1": "elasticache.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticache.eu-west-1.amazonaws.com", + "sa-east-1": "elasticache.sa-east-1.amazonaws.com", + "us-east-1": "elasticache.us-east-1.amazonaws.com", + "us-west-1": "elasticache.us-west-1.amazonaws.com", + "us-west-2": "elasticache.us-west-2.amazonaws.com" + }, + "elasticbeanstalk": { + "ap-northeast-1": "elasticbeanstalk.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticbeanstalk.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticbeanstalk.ap-southeast-2.amazonaws.com", + "eu-west-1": "elasticbeanstalk.eu-west-1.amazonaws.com", + "sa-east-1": "elasticbeanstalk.sa-east-1.amazonaws.com", + "us-east-1": "elasticbeanstalk.us-east-1.amazonaws.com", + "us-west-1": "elasticbeanstalk.us-west-1.amazonaws.com", + "us-west-2": "elasticbeanstalk.us-west-2.amazonaws.com" + }, + "elasticloadbalancing": { + "ap-northeast-1": "elasticloadbalancing.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticloadbalancing.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticloadbalancing.ap-southeast-2.amazonaws.com", + "cn-north-1": "elasticloadbalancing.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticloadbalancing.eu-west-1.amazonaws.com", + "sa-east-1": "elasticloadbalancing.sa-east-1.amazonaws.com", + "us-east-1": "elasticloadbalancing.us-east-1.amazonaws.com", + "us-gov-west-1": "elasticloadbalancing.us-gov-west-1.amazonaws.com", + "us-west-1": "elasticloadbalancing.us-west-1.amazonaws.com", + "us-west-2": "elasticloadbalancing.us-west-2.amazonaws.com" + }, + "elasticmapreduce": { + "ap-northeast-1": "ap-northeast-1.elasticmapreduce.amazonaws.com", + "ap-southeast-1": "ap-southeast-1.elasticmapreduce.amazonaws.com", + "ap-southeast-2": "ap-southeast-2.elasticmapreduce.amazonaws.com", + "cn-north-1": "elasticmapreduce.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticmapreduce.eu-west-1.amazonaws.com", + "sa-east-1": "sa-east-1.elasticmapreduce.amazonaws.com", + "us-east-1": "elasticmapreduce.us-east-1.amazonaws.com", + "us-gov-west-1": "us-gov-west-1.elasticmapreduce.amazonaws.com", + "us-west-1": "us-west-1.elasticmapreduce.amazonaws.com", + "us-west-2": "us-west-2.elasticmapreduce.amazonaws.com" + }, + "elastictranscoder": { + "ap-northeast-1": "elastictranscoder.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elastictranscoder.ap-southeast-1.amazonaws.com", + "eu-west-1": "elastictranscoder.eu-west-1.amazonaws.com", + "us-east-1": "elastictranscoder.us-east-1.amazonaws.com", + "us-west-1": "elastictranscoder.us-west-1.amazonaws.com", + "us-west-2": "elastictranscoder.us-west-2.amazonaws.com" + }, + "glacier": { + "ap-northeast-1": "glacier.ap-northeast-1.amazonaws.com", + "ap-southeast-2": "glacier.ap-southeast-2.amazonaws.com", + "cn-north-1": "glacier.cn-north-1.amazonaws.com.cn", + "eu-west-1": "glacier.eu-west-1.amazonaws.com", + "us-east-1": "glacier.us-east-1.amazonaws.com", + "us-west-1": "glacier.us-west-1.amazonaws.com", + "us-west-2": "glacier.us-west-2.amazonaws.com" + }, + "iam": { + "ap-northeast-1": "iam.amazonaws.com", + "ap-southeast-1": "iam.amazonaws.com", + "ap-southeast-2": "iam.amazonaws.com", + "cn-north-1": "iam.cn-north-1.amazonaws.com.cn", + "eu-west-1": "iam.amazonaws.com", + "sa-east-1": "iam.amazonaws.com", + "us-east-1": "iam.amazonaws.com", + "us-gov-west-1": "iam.us-gov.amazonaws.com", + "us-west-1": "iam.amazonaws.com", + "us-west-2": "iam.amazonaws.com" + }, + "importexport": { + "ap-northeast-1": "importexport.amazonaws.com", + "ap-southeast-1": "importexport.amazonaws.com", + "ap-southeast-2": "importexport.amazonaws.com", + "eu-west-1": "importexport.amazonaws.com", + "sa-east-1": "importexport.amazonaws.com", + "us-east-1": "importexport.amazonaws.com", + "us-west-1": "importexport.amazonaws.com", + "us-west-2": "importexport.amazonaws.com" + }, + "kinesis": { + "us-east-1": "kinesis.us-east-1.amazonaws.com" + }, + "opsworks": { + "us-east-1": "opsworks.us-east-1.amazonaws.com" + }, + "rds": { + "ap-northeast-1": "rds.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "rds.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "rds.ap-southeast-2.amazonaws.com", + "cn-north-1": "rds.cn-north-1.amazonaws.com.cn", + "eu-west-1": "rds.eu-west-1.amazonaws.com", + "sa-east-1": "rds.sa-east-1.amazonaws.com", + "us-east-1": "rds.amazonaws.com", + "us-gov-west-1": "rds.us-gov-west-1.amazonaws.com", + "us-west-1": "rds.us-west-1.amazonaws.com", + "us-west-2": "rds.us-west-2.amazonaws.com" + }, + "redshift": { + "ap-northeast-1": "redshift.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "redshift.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "redshift.ap-southeast-2.amazonaws.com", + "eu-west-1": "redshift.eu-west-1.amazonaws.com", + "us-east-1": "redshift.us-east-1.amazonaws.com", + "us-west-2": "redshift.us-west-2.amazonaws.com" + }, + "route53": { + "ap-northeast-1": "route53.amazonaws.com", + "ap-southeast-1": "route53.amazonaws.com", + "ap-southeast-2": "route53.amazonaws.com", + "eu-west-1": "route53.amazonaws.com", + "sa-east-1": "route53.amazonaws.com", + "us-east-1": "route53.amazonaws.com", + "us-west-1": "route53.amazonaws.com", + "us-west-2": "route53.amazonaws.com" + }, + "s3": { + "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", + "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com", + "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", + "eu-west-1": "s3-eu-west-1.amazonaws.com", + "sa-east-1": "s3-sa-east-1.amazonaws.com", + "us-east-1": "s3.amazonaws.com", + "us-gov-west-1": "s3-us-gov-west-1.amazonaws.com", + "us-west-1": "s3-us-west-1.amazonaws.com", + "us-west-2": "s3-us-west-2.amazonaws.com" + }, + "sdb": { + "ap-northeast-1": "sdb.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "sdb.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "sdb.ap-southeast-2.amazonaws.com", + "eu-west-1": "sdb.eu-west-1.amazonaws.com", + "sa-east-1": "sdb.sa-east-1.amazonaws.com", + "us-east-1": "sdb.amazonaws.com", + "us-west-1": "sdb.us-west-1.amazonaws.com", + "us-west-2": "sdb.us-west-2.amazonaws.com" + }, + "ses": { + "eu-west-1": "email.eu-west-1.amazonaws.com", + "us-east-1": "email.us-east-1.amazonaws.com", + "us-west-2": "email.us-west-2.amazonaws.com" + }, + "sns": { + "ap-northeast-1": "sns.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "sns.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "sns.ap-southeast-2.amazonaws.com", + "cn-north-1": "sns.cn-north-1.amazonaws.com.cn", + "eu-west-1": "sns.eu-west-1.amazonaws.com", + "sa-east-1": "sns.sa-east-1.amazonaws.com", + "us-east-1": "sns.us-east-1.amazonaws.com", + "us-gov-west-1": "sns.us-gov-west-1.amazonaws.com", + "us-west-1": "sns.us-west-1.amazonaws.com", + "us-west-2": "sns.us-west-2.amazonaws.com" + }, + "sqs": { + "ap-northeast-1": "ap-northeast-1.queue.amazonaws.com", + "ap-southeast-1": "ap-southeast-1.queue.amazonaws.com", + "ap-southeast-2": "ap-southeast-2.queue.amazonaws.com", + "cn-north-1": "sqs.cn-north-1.amazonaws.com.cn", + "eu-west-1": "eu-west-1.queue.amazonaws.com", + "sa-east-1": "sa-east-1.queue.amazonaws.com", + "us-east-1": "queue.amazonaws.com", + "us-gov-west-1": "us-gov-west-1.queue.amazonaws.com", + "us-west-1": "us-west-1.queue.amazonaws.com", + "us-west-2": "us-west-2.queue.amazonaws.com" + }, + "storagegateway": { + "ap-northeast-1": "storagegateway.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "storagegateway.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "storagegateway.ap-southeast-2.amazonaws.com", + "eu-west-1": "storagegateway.eu-west-1.amazonaws.com", + "sa-east-1": "storagegateway.sa-east-1.amazonaws.com", + "us-east-1": "storagegateway.us-east-1.amazonaws.com", + "us-west-1": "storagegateway.us-west-1.amazonaws.com", + "us-west-2": "storagegateway.us-west-2.amazonaws.com" + }, + "sts": { + "ap-northeast-1": "sts.amazonaws.com", + "ap-southeast-1": "sts.amazonaws.com", + "ap-southeast-2": "sts.amazonaws.com", + "cn-north-1": "sts.cn-north-1.amazonaws.com.cn", + "eu-west-1": "sts.amazonaws.com", + "sa-east-1": "sts.amazonaws.com", + "us-east-1": "sts.amazonaws.com", + "us-gov-west-1": "sts.us-gov-west-1.amazonaws.com", + "us-west-1": "sts.amazonaws.com", + "us-west-2": "sts.amazonaws.com" + }, + "support": { + "us-east-1": "support.us-east-1.amazonaws.com" + }, + "swf": { + "ap-northeast-1": "swf.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "swf.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "swf.ap-southeast-2.amazonaws.com", + "cn-north-1": "swf.cn-north-1.amazonaws.com.cn", + "eu-west-1": "swf.eu-west-1.amazonaws.com", + "sa-east-1": "swf.sa-east-1.amazonaws.com", + "us-east-1": "swf.us-east-1.amazonaws.com", + "us-gov-west-1": "swf.us-gov-west-1.amazonaws.com", + "us-west-1": "swf.us-west-1.amazonaws.com", + "us-west-2": "swf.us-west-2.amazonaws.com" + } +} diff --git a/awx/lib/site-packages/boto/exception.py b/awx/lib/site-packages/boto/exception.py index 53626bfebb..99205c9f28 100644 --- a/awx/lib/site-packages/boto/exception.py +++ b/awx/lib/site-packages/boto/exception.py @@ -27,6 +27,7 @@ Exception classes - Subclassing allows you to check for specific errors import base64 import xml.sax from boto import handler +from boto.compat import json from boto.resultset import ResultSet @@ -88,12 +89,25 @@ class BotoServerError(StandardError): h = handler.XmlHandlerWrapper(self, self) h.parseString(self.body) except (TypeError, xml.sax.SAXParseException), pe: - # Remove unparsable message body so we don't include garbage - # in exception. But first, save self.body in self.error_message - # because occasionally we get error messages from Eucalyptus - # that are just text strings that we want to preserve. - self.message = self.body - self.body = None + # What if it's JSON? Let's try that. + try: + parsed = json.loads(self.body) + + if 'RequestId' in parsed: + self.request_id = parsed['RequestId'] + if 'Error' in parsed: + if 'Code' in parsed['Error']: + self.error_code = parsed['Error']['Code'] + if 'Message' in parsed['Error']: + self.message = parsed['Error']['Message'] + + except ValueError: + # Remove unparsable message body so we don't include garbage + # in exception. But first, save self.body in self.error_message + # because occasionally we get error messages from Eucalyptus + # that are just text strings that we want to preserve. + self.message = self.body + self.body = None def __getattr__(self, name): if name == 'error_message': diff --git a/awx/lib/site-packages/boto/fps/response.py b/awx/lib/site-packages/boto/fps/response.py index ef12b00f34..94c8d15199 100644 --- a/awx/lib/site-packages/boto/fps/response.py +++ b/awx/lib/site-packages/boto/fps/response.py @@ -1,3 +1,26 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. from decimal import Decimal diff --git a/awx/lib/site-packages/boto/glacier/__init__.py b/awx/lib/site-packages/boto/glacier/__init__.py index ccf35b326d..713740b429 100644 --- a/awx/lib/site-packages/boto/glacier/__init__.py +++ b/awx/lib/site-packages/boto/glacier/__init__.py @@ -21,7 +21,7 @@ # IN THE SOFTWARE. # -from boto.ec2.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -32,28 +32,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.glacier.layer2 import Layer2 - return [RegionInfo(name='us-east-1', - endpoint='glacier.us-east-1.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='us-west-1', - endpoint='glacier.us-west-1.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='us-west-2', - endpoint='glacier.us-west-2.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='ap-northeast-1', - endpoint='glacier.ap-northeast-1.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='eu-west-1', - endpoint='glacier.eu-west-1.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='ap-southeast-2', - endpoint='glacier.ap-southeast-2.amazonaws.com', - connection_cls=Layer2), - RegionInfo(name='cn-north-1', - endpoint='glacier.cn-north-1.amazonaws.com.cn', - connection_cls=Layer2), - ] + return get_regions('glacier', connection_cls=Layer2) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/glacier/concurrent.py b/awx/lib/site-packages/boto/glacier/concurrent.py index af727ec28b..dc54081998 100644 --- a/awx/lib/site-packages/boto/glacier/concurrent.py +++ b/awx/lib/site-packages/boto/glacier/concurrent.py @@ -19,6 +19,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # +from __future__ import with_statement + import os import math import threading diff --git a/awx/lib/site-packages/boto/glacier/job.py b/awx/lib/site-packages/boto/glacier/job.py index c740174117..d26d6b40dc 100644 --- a/awx/lib/site-packages/boto/glacier/job.py +++ b/awx/lib/site-packages/boto/glacier/job.py @@ -97,9 +97,12 @@ class Job(object): actual_tree_hash, response['TreeHash'], byte_range)) return response + def _calc_num_chunks(self, chunk_size): + return int(math.ceil(self.archive_size / float(chunk_size))) + def download_to_file(self, filename, chunk_size=DefaultPartSize, verify_hashes=True, retry_exceptions=(socket.error,)): - """Download an archive to a file. + """Download an archive to a file by name. :type filename: str :param filename: The name of the file where the archive @@ -114,11 +117,33 @@ class Job(object): the tree hashes for each downloaded chunk. """ - num_chunks = int(math.ceil(self.archive_size / float(chunk_size))) + num_chunks = self._calc_num_chunks(chunk_size) with open(filename, 'wb') as output_file: self._download_to_fileob(output_file, num_chunks, chunk_size, verify_hashes, retry_exceptions) + def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize, + verify_hashes=True, + retry_exceptions=(socket.error,)): + """Download an archive to a file object. + + :type output_file: file + :param output_file: The file object where the archive + contents will be saved. + + :type chunk_size: int + :param chunk_size: The chunk size to use when downloading + the archive. + + :type verify_hashes: bool + :param verify_hashes: Indicates whether or not to verify + the tree hashes for each downloaded chunk. + + """ + num_chunks = self._calc_num_chunks(chunk_size) + self._download_to_fileob(output_file, num_chunks, chunk_size, + verify_hashes, retry_exceptions) + def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes, retry_exceptions): for i in xrange(num_chunks): diff --git a/awx/lib/site-packages/boto/glacier/layer1.py b/awx/lib/site-packages/boto/glacier/layer1.py index f6b05588bb..f46fbf0971 100644 --- a/awx/lib/site-packages/boto/glacier/layer1.py +++ b/awx/lib/site-packages/boto/glacier/layer1.py @@ -33,9 +33,42 @@ from .utils import ResettingFileSender class Layer1(AWSAuthConnection): + """ + Amazon Glacier is a storage solution for "cold data." + Amazon Glacier is an extremely low-cost storage service that + provides secure, durable and easy-to-use storage for data backup + and archival. With Amazon Glacier, customers can store their data + cost effectively for months, years, or decades. Amazon Glacier + also enables customers to offload the administrative burdens of + operating and scaling storage to AWS, so they don't have to worry + about capacity planning, hardware provisioning, data replication, + hardware failure and recovery, or time-consuming hardware + migrations. + + Amazon Glacier is a great storage choice when low storage cost is + paramount, your data is rarely retrieved, and retrieval latency of + several hours is acceptable. If your application requires fast or + frequent access to your data, consider using Amazon S3. For more + information, go to `Amazon Simple Storage Service (Amazon S3)`_. + + You can store any kind of data in any format. There is no maximum + limit on the total amount of data you can store in Amazon Glacier. + + If you are a first-time user of Amazon Glacier, we recommend that + you begin by reading the following sections in the Amazon Glacier + Developer Guide : + + + + `What is Amazon Glacier`_ - This section of the Developer Guide + describes the underlying data model, the operations it supports, + and the AWS SDKs that you can use to interact with the service. + + `Getting Started with Amazon Glacier`_ - The Getting Started + section walks you through the process of creating a vault, + uploading archives, creating jobs to download archives, retrieving + the job output, and deleting archives. + """ Version = '2012-06-01' - """Glacier API version.""" def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, account_id='-', is_secure=True, port=None, @@ -44,7 +77,8 @@ class Layer1(AWSAuthConnection): https_connection_factory=None, path='/', provider='aws', security_token=None, suppress_consec_slashes=True, - region=None, region_name='us-east-1'): + region=None, region_name='us-east-1', + profile_name=None): if not region: for reg in boto.glacier.regions(): @@ -60,7 +94,7 @@ class Layer1(AWSAuthConnection): proxy_user, proxy_pass, debug, https_connection_factory, path, provider, security_token, - suppress_consec_slashes) + suppress_consec_slashes, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -87,35 +121,39 @@ class Layer1(AWSAuthConnection): def list_vaults(self, limit=None, marker=None): """ - This operation lists all vaults owned by the calling user’s + This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name. By default, this operation returns up to 1,000 items. If there - are more vaults to list, the marker field in the response body - contains the vault Amazon Resource Name (ARN) at which to - continue the list with a new List Vaults request; otherwise, - the marker field is null. In your next List Vaults request you - set the marker parameter to the value Amazon Glacier returned - in the responses to your previous List Vaults request. You can - also limit the number of vaults returned in the response by - specifying the limit parameter in the request. + are more vaults to list, the response `marker` field contains + the vault Amazon Resource Name (ARN) at which to continue the + list with a new List Vaults request; otherwise, the `marker` + field is `null`. To return a list of vaults that begins at a + specific vault, set the `marker` request parameter to the + vault ARN you obtained from a previous List Vaults request. + You can also limit the number of vaults returned in the + response by specifying the `limit` parameter in the request. - :type limit: int - :param limit: The maximum number of items returned in the - response. If you don't specify a value, the List Vaults - operation returns up to 1,000 items. + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. - :type marker: str - :param marker: A string used for pagination. marker specifies - the vault ARN after which the listing of vaults should - begin. (The vault specified by marker is not included in - the returned list.) Get the marker value from a previous - List Vaults response. You need to include the marker only - if you are continuing the pagination of results started in - a previous List Vaults request. Specifying an empty value - ("") for the marker returns a list of vaults starting - from the first vault. + For conceptual information and underlying REST API, go to + `Retrieving Vault Metadata in Amazon Glacier`_ and `List + Vaults `_ in the Amazon Glacier Developer Guide . + + :type marker: string + :param marker: A string used for pagination. The marker specifies the + vault ARN after which the listing of vaults should begin. + + :type limit: string + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the List Vaults operation returns up to + 1,000 items. """ params = {} if limit: @@ -127,18 +165,31 @@ class Layer1(AWSAuthConnection): def describe_vault(self, vault_name): """ This operation returns information about a vault, including - the vault Amazon Resource Name (ARN), the date the vault was - created, the number of archives contained within the vault, - and the total size of all the archives in the vault. The - number of archives and their total size are as of the last - vault inventory Amazon Glacier generated. Amazon Glacier - generates vault inventories approximately daily. This means - that if you add or remove an archive from a vault, and then - immediately send a Describe Vault request, the response might - not reflect the changes. + the vault's Amazon Resource Name (ARN), the date the vault was + created, the number of archives it contains, and the total + size of all the archives in the vault. The number of archives + and their total size are as of the last inventory generation. + This means that if you add or remove an archive from a vault, + and then immediately use Describe Vault, the change in + contents will not be immediately reflected. If you want to + retrieve the latest inventory of the vault, use InitiateJob. + Amazon Glacier generates vault inventories approximately + daily. For more information, see `Downloading a Vault + Inventory in Amazon Glacier`_. - :type vault_name: str - :param vault_name: The name of the new vault + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Retrieving Vault Metadata in Amazon Glacier`_ and `Describe + Vault `_ in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('GET', uri) @@ -147,23 +198,34 @@ class Layer1(AWSAuthConnection): """ This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an - AWS account. You can create up to 1,000 vaults per - account. For information on creating more vaults, go to the - Amazon Glacier product detail page. + AWS account. You can create up to 1,000 vaults per account. If + you need to create more vaults, contact Amazon Glacier. You must use the following guidelines when naming a vault. - Names can be between 1 and 255 characters long. - Allowed characters are a–z, A–Z, 0–9, '_' (underscore), - '-' (hyphen), and '.' (period). - This operation is idempotent, you can send the same request - multiple times and it has no further effect after the first - time Amazon Glacier creates the specified vault. + + Names can be between 1 and 255 characters long. + + Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' + (hyphen), and '.' (period). - :type vault_name: str - :param vault_name: The name of the new vault + + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Creating a Vault in Amazon Glacier`_ and `Create Vault `_ in + the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('PUT', uri, ok_responses=(201,), @@ -172,50 +234,114 @@ class Layer1(AWSAuthConnection): def delete_vault(self, vault_name): """ This operation deletes a vault. Amazon Glacier will delete a - vault only if there are no archives in the vault as per the + vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not - removed) and Amazon Glacier returns an error. + removed) and Amazon Glacier returns an error. You can use + DescribeVault to return the number of archives in a vault, and + you can use `Initiate a Job (POST jobs)`_ to initiate a new + inventory retrieval for a vault. The inventory contains the + archive IDs you use to delete archives using `Delete Archive + (DELETE archive)`_. - This operation is idempotent, you can send the same request - multiple times and it has no further effect after the first - time Amazon Glacier delete the specified vault. + This operation is idempotent. - :type vault_name: str - :param vault_name: The name of the new vault + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Deleting a Vault in Amazon Glacier`_ and `Delete Vault `_ in + the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('DELETE', uri, ok_responses=(204,)) def get_vault_notifications(self, vault_name): """ - This operation retrieves the notification-configuration - subresource set on the vault. + This operation retrieves the `notification-configuration` + subresource of the specified vault. - :type vault_name: str - :param vault_name: The name of the new vault + For information about setting a notification configuration on + a vault, see SetVaultNotifications. If a notification + configuration for a vault is not set, the operation returns a + `404 Not Found` error. For more information about vault + notifications, see `Configuring Vault Notifications in Amazon + Glacier`_. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and `Get + Vault Notification Configuration `_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s/notification-configuration' % vault_name return self.make_request('GET', uri) def set_vault_notifications(self, vault_name, notification_config): """ - This operation retrieves the notification-configuration - subresource set on the vault. + This operation configures notifications that will be sent when + specific events happen to a vault. By default, you don't get + any notifications. - :type vault_name: str - :param vault_name: The name of the new vault + To configure vault notifications, send a PUT request to the + `notification-configuration` subresource of the vault. The + request should include a JSON document that provides an Amazon + SNS topic and specific events for which you want Amazon + Glacier to send notifications to the topic. - :type notification_config: dict - :param notification_config: A Python dictionary containing - an SNS Topic and events for which you want Amazon Glacier - to send notifications to the topic. Possible events are: + Amazon SNS topics must grant permission to the vault to be + allowed to publish notifications to the topic. You can + configure a vault to publish a notification for the following + vault events: - * ArchiveRetrievalCompleted - occurs when a job that was - initiated for an archive retrieval is completed. - * InventoryRetrievalCompleted - occurs when a job that was - initiated for an inventory retrieval is completed. + + + **ArchiveRetrievalCompleted** This event occurs when a job + that was initiated for an archive retrieval is completed + (InitiateJob). The status of the completed job can be + "Succeeded" or "Failed". The notification sent to the SNS + topic is the same output as returned from DescribeJob. + + **InventoryRetrievalCompleted** This event occurs when a job + that was initiated for an inventory retrieval is completed + (InitiateJob). The status of the completed job can be + "Succeeded" or "Failed". The notification sent to the SNS + topic is the same output as returned from DescribeJob. + + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and `Set + Vault Notification Configuration `_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type vault_notification_config: dict + :param vault_notification_config: Provides options for specifying + notification configuration. The format of the dictionary is: @@ -229,11 +355,27 @@ class Layer1(AWSAuthConnection): def delete_vault_notifications(self, vault_name): """ - This operation deletes the notification-configuration - subresource set on the vault. + This operation deletes the notification configuration set for + a vault. The operation is eventually consistent;that is, it + might take some time for Amazon Glacier to completely disable + the notifications and you might still receive some + notifications for a short time after you send the delete + request. - :type vault_name: str - :param vault_name: The name of the new vault + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and + `Delete Vault Notification Configuration `_ in the Amazon + Glacier Developer Guide. + + :type vault_name: string + :param vault_name: The name of the vault. """ uri = 'vaults/%s/notification-configuration' % vault_name return self.make_request('DELETE', uri, ok_responses=(204,)) @@ -243,36 +385,80 @@ class Layer1(AWSAuthConnection): def list_jobs(self, vault_name, completed=None, status_code=None, limit=None, marker=None): """ - This operation lists jobs for a vault including jobs that are + This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. - :type vault_name: str + + Amazon Glacier retains recently completed jobs for a period + before deleting them; however, it eventually removes completed + jobs. The output of completed jobs can be retrieved. Retaining + completed jobs for a period of time after they have completed + enables you to get a job output in the event you miss the job + completion notification or your first attempt to download it + fails. For example, suppose you start an archive retrieval job + to download an archive. After the job completes, you start to + download the archive but encounter a network error. In this + scenario, you can retry and download the archive while the job + exists. + + + To retrieve an archive or retrieve a vault inventory from + Amazon Glacier, you first initiate a job, and after the job + completes, you download the data. For an archive retrieval, + the output is the archive data, and for an inventory + retrieval, it is the inventory list. The List Job operation + returns a list of these jobs sorted by job initiation time. + + This List Jobs operation supports pagination. By default, this + operation returns up to 1,000 jobs in the response. You should + always check the response for a `marker` at which to continue + the list; if there are no more items the `marker` is `null`. + To return a list of jobs that begins at a specific job, set + the `marker` request parameter to the value you obtained from + a previous List Jobs request. You can also limit the number of + jobs returned in the response by specifying the `limit` + parameter in the request. + + Additionally, you can filter the jobs list returned by + specifying an optional `statuscode` (InProgress, Succeeded, or + Failed) and `completed` (true, false) parameter. The + `statuscode` allows you to specify that only jobs that match a + specified status are returned. The `completed` parameter + allows you to specify that only jobs in a specific completion + state are returned. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For the underlying REST API, go to `List Jobs `_ + + :type vault_name: string :param vault_name: The name of the vault. - :type completed: boolean - :param completed: Specifies the state of the jobs to return. - If a value of True is passed, only completed jobs will - be returned. If a value of False is passed, only - uncompleted jobs will be returned. If no value is - passed, all jobs will be returned. + :type limit: string + :param limit: Specifies that the response be limited to the specified + number of items or fewer. If not specified, the List Jobs operation + returns up to 1,000 jobs. - :type status_code: string - :param status_code: Specifies the type of job status to return. - Valid values are: InProgress|Succeeded|Failed. If not - specified, jobs with all status codes are returned. + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the job at which the listing of jobs should begin. Get + the marker value from a previous List Jobs response. You need only + include the marker if you are continuing the pagination of results + started in a previous List Jobs request. - :type limit: int - :param limit: The maximum number of items returned in the - response. If you don't specify a value, the List Jobs - operation returns up to 1,000 items. + :type statuscode: string + :param statuscode: Specifies the type of job status to return. You can + specify the following values: "InProgress", "Succeeded", or + "Failed". - :type marker: str - :param marker: An opaque string used for pagination. marker - specifies the job at which the listing of jobs should - begin. Get the marker value from a previous List Jobs - response. You need only include the marker if you are - continuing the pagination of results started in a previous - List Jobs request. + :type completed: string + :param completed: Specifies the state of the jobs to return. You can + specify `True` or `False`. """ params = {} @@ -292,39 +478,154 @@ class Layer1(AWSAuthConnection): This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon - Simple Notification Service (Amazon SNS) topic to notify after - Amazon Glacier completes the job. + SNS topic to notify after Amazon Glacier completes the job. + For more information about initiating a job, see InitiateJob. - :type vault_name: str - :param vault_name: The name of the new vault - :type job_id: str - :param job_id: The ID of the job. + This operation enables you to check the status of your job. + However, it is strongly recommended that you set up an Amazon + SNS topic and specify it in your initiate job request so that + Amazon Glacier can notify the topic after it completes the + job. + + + A job ID will not expire for at least 24 hours after Amazon + Glacier completes the job. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For information about the underlying REST API, go to `Working + with Archives in Amazon Glacier`_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_id: string + :param job_id: The ID of the job to describe. """ uri = 'vaults/%s/jobs/%s' % (vault_name, job_id) return self.make_request('GET', uri, ok_responses=(200,)) def initiate_job(self, vault_name, job_data): """ - This operation initiates a job of the specified - type. Retrieving an archive or a vault inventory are - asynchronous operations that require you to initiate a job. It - is a two-step process: + This operation initiates a job of the specified type. In this + release, you can initiate a job to retrieve either an archive + or a vault inventory (a list of archives in a vault). - * Initiate a retrieval job. - * After the job completes, download the bytes. + Retrieving data from Amazon Glacier is a two-step process: - The retrieval is executed asynchronously. When you initiate - a retrieval job, Amazon Glacier creates a job and returns a - job ID in the response. - :type vault_name: str - :param vault_name: The name of the new vault + #. Initiate a retrieval job. + #. After the job completes, download the bytes. - :type job_data: dict - :param job_data: A Python dictionary containing the - information about the requested job. The dictionary - can contain the following attributes: + + The retrieval request is executed asynchronously. When you + initiate a retrieval job, Amazon Glacier creates a job and + returns a job ID in the response. When Amazon Glacier + completes the job, you can get the job output (archive or + inventory data). For information about getting job output, see + GetJobOutput operation. + + The job must complete before you can get its output. To + determine when a job is complete, you have the following + options: + + + + **Use Amazon SNS Notification** You can specify an Amazon + Simple Notification Service (Amazon SNS) topic to which Amazon + Glacier can post a notification after the job is completed. + You can specify an SNS topic per job request. The notification + is sent only after Amazon Glacier completes the job. In + addition to specifying an SNS topic per job request, you can + configure vault notifications for a vault so that job + notifications are always sent. For more information, see + SetVaultNotifications. + + **Get job details** You can make a DescribeJob request to + obtain job status information while a job is in progress. + However, it is more efficient to use an Amazon SNS + notification to determine when a job is complete. + + + + The information you get via notification is same that you get + by calling DescribeJob. + + + If for a specific event, you add both the notification + configuration on the vault and also specify an SNS topic in + your initiate job request, Amazon Glacier sends both + notifications. For more information, see + SetVaultNotifications. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + **About the Vault Inventory** + + Amazon Glacier prepares an inventory for each vault + periodically, every 24 hours. When you initiate a job for a + vault inventory, Amazon Glacier returns the last inventory for + the vault. The inventory data you get might be up to a day or + two days old. Also, the initiate inventory job might take some + time to complete before you can download the vault inventory. + So you do not want to retrieve a vault inventory for each + vault operation. However, in some scenarios, you might find + the vault inventory useful. For example, when you upload an + archive, you can provide an archive description but not an + archive name. Amazon Glacier provides you a unique archive ID, + an opaque string of characters. So, you might maintain your + own database that maps archive names to their corresponding + Amazon Glacier assigned archive IDs. You might find the vault + inventory useful in the event you need to reconcile + information in your database with the actual vault inventory. + + **About Ranged Archive Retrieval** + + You can initiate an archive retrieval for the whole archive or + a range of the archive. In the case of ranged archive + retrieval, you specify a byte range to return or the whole + archive. The range specified must be megabyte (MB) aligned, + that is the range start value must be divisible by 1 MB and + range end value plus 1 must be divisible by 1 MB or equal the + end of the archive. If the ranged archive retrieval is not + megabyte aligned, this operation returns a 400 response. + Furthermore, to ensure you get checksum values for data you + download using Get Job Output API, the range must be tree hash + aligned. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Initiate a Job`_ and `Downloading a Vault Inventory`_ + + :type account_id: string + :param account_id: The `AccountId` is the AWS Account ID. You can + specify either the AWS Account ID or optionally a '-', in which + case Amazon Glacier uses the AWS Account ID associated with the + credentials used to sign the request. If you specify your Account + ID, do not include hyphens in it. + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_parameters: dict + :param job_parameters: Provides options for specifying job information. + The dictionary can contain the following attributes: * ArchiveId - The ID of the archive you want to retrieve. This field is required only if the Type is set to @@ -340,6 +641,12 @@ class Layer1(AWSAuthConnection): archive-retrieval|inventory-retrieval * RetrievalByteRange - Optionally specify the range of bytes to retrieve. + * InventoryRetrievalParameters: Optional job parameters + * Format - The output format, like "JSON" + * StartDate - ISO8601 starting date string + * EndDate - ISO8601 ending date string + * Limit - Maximum number of entries + * Marker - A unique string used for pagination """ uri = 'vaults/%s/jobs' % vault_name @@ -353,27 +660,72 @@ class Layer1(AWSAuthConnection): def get_job_output(self, vault_name, job_id, byte_range=None): """ This operation downloads the output of the job you initiated - using Initiate a Job. Depending on the job type - you specified when you initiated the job, the output will be - either the content of an archive or a vault inventory. + using InitiateJob. Depending on the job type you specified + when you initiated the job, the output will be either the + content of an archive or a vault inventory. - You can download all the job output or download a portion of - the output by specifying a byte range. In the case of an - archive retrieval job, depending on the byte range you - specify, Amazon Glacier returns the checksum for the portion - of the data. You can compute the checksum on the client and - verify that the values match to ensure the portion you - downloaded is the correct data. + A job ID will not expire for at least 24 hours after Amazon + Glacier completes the job. That is, you can download the job + output within the 24 hours period after Amazon Glacier + completes the job. - :type vault_name: str :param - :param vault_name: The name of the new vault + If the job output is large, then you can use the `Range` + request header to retrieve a portion of the output. This + allows you to download the entire output in smaller chunks of + bytes. For example, suppose you have 1 GB of job output you + want to download and you decide to download 128 MB chunks of + data at a time, which is a total of eight Get Job Output + requests. You use the following process to download the job + output: - :type job_id: str - :param job_id: The ID of the job. - :type byte_range: tuple - :param range: A tuple of integers specifying the slice (in bytes) - of the archive you want to receive + #. Download a 128 MB chunk of output by specifying the + appropriate byte range using the `Range` header. + #. Along with the data, the response includes a checksum of + the payload. You compute the checksum of the payload on the + client and compare it with the checksum you received in the + response to ensure you received all the expected data. + #. Repeat steps 1 and 2 for all the eight 128 MB chunks of + output data, each time specifying the appropriate byte range. + #. After downloading all the parts of the job output, you have + a list of eight checksum values. Compute the tree hash of + these values to find the checksum of the entire output. Using + the Describe Job API, obtain job information of the job that + provided you the output. The response includes the checksum of + the entire archive stored in Amazon Glacier. You compare this + value with the checksum you computed to ensure you have + downloaded the entire archive content with no errors. + + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Downloading a Vault Inventory`_, `Downloading an Archive`_, + and `Get Job Output `_ + + :type account_id: string + :param account_id: The `AccountId` is the AWS Account ID. You can + specify either the AWS Account ID or optionally a '-', in which + case Amazon Glacier uses the AWS Account ID associated with the + credentials used to sign the request. If you specify your Account + ID, do not include hyphens in it. + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_id: string + :param job_id: The job ID whose data is downloaded. + + :type byte_range: string + :param byte_range: The range of bytes to retrieve from the output. For + example, if you want to download the first 1,048,576 bytes, specify + "Range: bytes=0-1048575". By default, this operation downloads the + entire output. """ response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'), ('Content-Range', u'ContentRange'), @@ -392,13 +744,50 @@ class Layer1(AWSAuthConnection): def upload_archive(self, vault_name, archive, linear_hash, tree_hash, description=None): """ - This operation adds an archive to a vault. For a successful - upload, your data is durably persisted. In response, Amazon - Glacier returns the archive ID in the x-amz-archive-id header - of the response. You should save the archive ID returned so - that you can access the archive later. + This operation adds an archive to a vault. This is a + synchronous operation, and for a successful upload, your data + is durably persisted. Amazon Glacier returns the archive ID in + the `x-amz-archive-id` header of the response. - :type vault_name: str :param + You must use the archive ID to access your data in Amazon + Glacier. After you upload an archive, you should save the + archive ID returned so that you can retrieve or delete the + archive later. Besides saving the archive ID, you can also + index it and give it a friendly name to allow for better + searching. You can also use the optional archive description + field to specify how the archive is referred to in an external + index of archives, such as you might create in Amazon + DynamoDB. You can also get the vault inventory to obtain a + list of archive IDs in a vault. For more information, see + InitiateJob. + + You must provide a SHA256 tree hash of the data you are + uploading. For information about computing a SHA256 tree hash, + see `Computing Checksums`_. + + You can optionally specify an archive description of up to + 1,024 printable ASCII characters. You can get the archive + description when you either retrieve the archive or get the + vault inventory. For more information, see InitiateJob. Amazon + Glacier does not interpret the description in any way. An + archive description does not need to be unique. You cannot use + the description to retrieve or sort the archive list. + + Archives are immutable. After you upload an archive, you + cannot edit the archive or its description. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading an Archive in Amazon Glacier`_ and `Upload + Archive`_ in the Amazon Glacier Developer Guide . + + :type vault_name: str :param vault_name: The name of the vault :type archive: bytes @@ -414,7 +803,8 @@ class Layer1(AWSAuthConnection): tree hash, see http://goo.gl/u7chF. :type description: str - :param description: An optional description of the archive. + :param description: The optional description of the archive you + are uploading. """ response_headers = [('x-amz-archive-id', u'ArchiveId'), ('Location', u'Location'), @@ -445,13 +835,39 @@ class Layer1(AWSAuthConnection): def delete_archive(self, vault_name, archive_id): """ - This operation deletes an archive from a vault. + This operation deletes an archive from a vault. Subsequent + requests to initiate a retrieval of this archive will fail. + Archive retrievals that are in progress for this archive ID + may or may not succeed according to the following scenarios: - :type vault_name: str - :param vault_name: The name of the new vault - :type archive_id: str - :param archive_id: The ID for the archive to be deleted. + + If the archive retrieval job is actively preparing the data + for download when Amazon Glacier receives the delete archive + request, the archival retrieval operation might fail. + + If the archive retrieval job has successfully prepared the + archive for download when Amazon Glacier receives the delete + archive request, you will be able to download the output. + + + This operation is idempotent. Attempting to delete an already- + deleted archive does not result in an error. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Deleting an Archive in Amazon Glacier`_ and `Delete Archive`_ + in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type archive_id: string + :param archive_id: The ID of the archive to delete. """ uri = 'vaults/%s/archives/%s' % (vault_name, archive_id) return self.make_request('DELETE', uri, ok_responses=(204,)) @@ -461,21 +877,65 @@ class Layer1(AWSAuthConnection): def initiate_multipart_upload(self, vault_name, part_size, description=None): """ - Initiate a multipart upload. Amazon Glacier creates a - multipart upload resource and returns it's ID. You use this - ID in subsequent multipart upload operations. + This operation initiates a multipart upload. Amazon Glacier + creates a multipart upload resource and returns its ID in the + response. The multipart upload ID is used in subsequent + requests to upload parts of an archive (see + UploadMultipartPart). + + When you initiate a multipart upload, you specify the part + size in number of bytes. The part size must be a megabyte + (1024 KB) multiplied by a power of 2-for example, 1048576 (1 + MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so + on. The minimum allowable part size is 1 MB, and the maximum + is 4 GB. + + Every part you upload to this resource (see + UploadMultipartPart), except the last one, must have the same + size. The last one can be the same size or smaller. For + example, suppose you want to upload a 16.2 MB file. If you + initiate the multipart upload with a part size of 4 MB, you + will upload four parts of 4 MB each and one part of 0.2 MB. + + + You don't need to know the size of the archive when you start + a multipart upload because Amazon Glacier does not require you + to specify the overall archive size. + + + After you complete the multipart upload, Amazon Glacier + removes the multipart upload resource referenced by the ID. + Amazon Glacier also removes the multipart upload resource if + you cancel the multipart upload or it may be removed if there + is no activity for a period of 24 hours. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Initiate Multipart Upload`_ in the Amazon Glacier Developer + Guide . + + The part size must be a megabyte (1024 KB) multiplied by a power of + 2, for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), + 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, + and the maximum is 4 GB (4096 MB). :type vault_name: str :param vault_name: The name of the vault. :type description: str - :param description: An optional description of the archive. + :param description: The archive description that you are uploading in + parts. :type part_size: int - :param part_size: The size of each part except the last, in bytes. - The part size must be a multiple of 1024 KB multiplied by - a power of 2. The minimum allowable part size is 1MB and the - maximum is 4GB. + :param part_size: The size of each part except the last, in bytes. The + last part can be smaller than this part size. """ response_headers = [('x-amz-multipart-upload-id', u'UploadId'), ('Location', u'Location')] @@ -491,24 +951,77 @@ class Layer1(AWSAuthConnection): def complete_multipart_upload(self, vault_name, upload_id, sha256_treehash, archive_size): """ - Call this to inform Amazon Glacier that all of the archive parts - have been uploaded and Amazon Glacier can now assemble the archive - from the uploaded parts. + You call this operation to inform Amazon Glacier that all the + archive parts have been uploaded and that Amazon Glacier can + now assemble the archive from the uploaded parts. After + assembling and saving the archive to the vault, Amazon Glacier + returns the URI path of the newly created archive resource. + Using the URI path, you can then access the archive. After you + upload an archive, you should save the archive ID returned to + retrieve the archive at a later point. You can also get the + vault inventory to obtain a list of archive IDs in a vault. + For more information, see InitiateJob. + + In the request, you must include the computed SHA256 tree hash + of the entire archive you have uploaded. For information about + computing a SHA256 tree hash, see `Computing Checksums`_. On + the server side, Amazon Glacier also constructs the SHA256 + tree hash of the assembled archive. If the values match, + Amazon Glacier saves the archive to the vault; otherwise, it + returns an error, and the operation fails. The ListParts + operation returns a list of parts uploaded for a specific + multipart upload. It includes checksum information for each + uploaded part that can be used to debug a bad checksum issue. + + Additionally, Amazon Glacier also checks for any missing + content ranges when assembling the archive, if missing content + ranges are found, Amazon Glacier returns an error and the + operation fails. + + Complete Multipart Upload is an idempotent operation. After + your first successful complete multipart upload, if you call + the operation again within a short period, the operation will + succeed and return the same archive ID. This is useful in the + event you experience a network issue that causes an aborted + connection or receive a 500 server error, in which case you + can repeat your Complete Multipart Upload request and get the + same archive ID without creating duplicate archives. Note, + however, that after the multipart upload completes, you cannot + call the List Parts operation and the multipart upload will + not appear in List Multipart Uploads response, even if + idempotent complete is possible. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Complete Multipart Upload`_ in the Amazon Glacier Developer + Guide . + + :type checksum: string + :param checksum: The SHA256 tree hash of the entire archive. It is the + tree hash of SHA256 tree hash of the individual parts. If the value + you specify in the request does not match the SHA256 tree hash of + the final assembled archive as computed by Amazon Glacier, Amazon + Glacier returns an error and the request fails. :type vault_name: str :param vault_name: The name of the vault. :type upload_id: str - :param upload_id: The unique ID associated with this upload - operation. + :param upload_id: The upload ID of the multipart upload. :type sha256_treehash: str - :param sha256_treehash: The SHA256 tree hash of the entire - archive. It is the tree hash of SHA256 tree hash of the - individual parts. If the value you specify in the request - does not match the SHA256 tree hash of the final assembled - archive as computed by Amazon Glacier, Amazon Glacier - returns an error and the request fails. + :param sha256_treehash: The SHA256 tree hash of the entire archive. + It is the tree hash of SHA256 tree hash of the individual parts. + If the value you specify in the request does not match the SHA256 + tree hash of the final assembled archive as computed by Amazon + Glacier, Amazon Glacier returns an error and the request fails. :type archive_size: int :param archive_size: The total size, in bytes, of the entire @@ -527,37 +1040,90 @@ class Layer1(AWSAuthConnection): def abort_multipart_upload(self, vault_name, upload_id): """ - Call this to abort a multipart upload identified by the upload ID. + This operation aborts a multipart upload identified by the + upload ID. - :type vault_name: str + After the Abort Multipart Upload request succeeds, you cannot + upload any more parts to the multipart upload or complete the + multipart upload. Aborting a completed upload fails. However, + aborting an already-aborted upload will succeed, for a short + time. For more information about uploading a part and + completing a multipart upload, see UploadMultipartPart and + CompleteMultipartUpload. + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `Abort + Multipart Upload`_ in the Amazon Glacier Developer Guide . + + :type vault_name: string :param vault_name: The name of the vault. - :type upload_id: str - :param upload_id: The unique ID associated with this upload - operation. + :type upload_id: string + :param upload_id: The upload ID of the multipart upload to delete. """ uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) return self.make_request('DELETE', uri, ok_responses=(204,)) def list_multipart_uploads(self, vault_name, limit=None, marker=None): """ - Lists in-progress multipart uploads for the specified vault. + This operation lists in-progress multipart uploads for the + specified vault. An in-progress multipart upload is a + multipart upload that has been initiated by an + InitiateMultipartUpload request, but has not yet been + completed or aborted. The list returned in the List Multipart + Upload response has no guaranteed order. - :type vault_name: str + The List Multipart Uploads operation supports pagination. By + default, this operation returns up to 1,000 multipart uploads + in the response. You should always check the response for a + `marker` at which to continue the list; if there are no more + items the `marker` is `null`. To return a list of multipart + uploads that begins at a specific upload, set the `marker` + request parameter to the value you obtained from a previous + List Multipart Upload request. You can also limit the number + of uploads returned in the response by specifying the `limit` + parameter in the request. + + Note the difference between this operation and listing parts + (ListParts). The List Multipart Uploads operation lists all + multipart uploads for a vault and does not require a multipart + upload ID. The List Parts operation requires a multipart + upload ID since parts are associated with a single upload. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `List Multipart + Uploads `_ in the Amazon Glacier Developer Guide . + + :type vault_name: string :param vault_name: The name of the vault. - :type limit: int - :param limit: The maximum number of items returned in the - response. If you don't specify a value, the operation - returns up to 1,000 items. + :type limit: string + :param limit: Specifies the maximum number of uploads returned in the + response body. If this value is not specified, the List Uploads + operation returns up to 1,000 uploads. - :type marker: str - :param marker: An opaque string used for pagination. marker - specifies the item at which the listing should - begin. Get the marker value from a previous - response. You need only include the marker if you are - continuing the pagination of results started in a previous - request. + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the upload at which the listing of uploads should begin. + Get the marker value from a previous List Uploads response. You + need only include the marker if you are continuing the pagination + of results started in a previous List Uploads request. """ params = {} if limit: @@ -569,27 +1135,51 @@ class Layer1(AWSAuthConnection): def list_parts(self, vault_name, upload_id, limit=None, marker=None): """ - Lists in-progress multipart uploads for the specified vault. + This operation lists the parts of an archive that have been + uploaded in a specific multipart upload. You can make this + request at any time during an in-progress multipart upload + before you complete the upload (see CompleteMultipartUpload. + List Parts returns an error for completed uploads. The list + returned in the List Parts response is sorted by part range. - :type vault_name: str + The List Parts operation supports pagination. By default, this + operation returns up to 1,000 uploaded parts in the response. + You should always check the response for a `marker` at which + to continue the list; if there are no more items the `marker` + is `null`. To return a list of parts that begins at a specific + part, set the `marker` request parameter to the value you + obtained from a previous List Parts request. You can also + limit the number of parts returned in the response by + specifying the `limit` parameter in the request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `List Parts`_ + in the Amazon Glacier Developer Guide . + + :type vault_name: string :param vault_name: The name of the vault. - :type upload_id: str - :param upload_id: The unique ID associated with this upload - operation. + :type upload_id: string + :param upload_id: The upload ID of the multipart upload. - :type limit: int - :param limit: The maximum number of items returned in the - response. If you don't specify a value, the operation - returns up to 1,000 items. + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the part at which the listing of parts should begin. Get + the marker value from the response of a previous List Parts + response. You need only include the marker if you are continuing + the pagination of results started in a previous List Parts request. - :type marker: str - :param marker: An opaque string used for pagination. marker - specifies the item at which the listing should - begin. Get the marker value from a previous - response. You need only include the marker if you are - continuing the pagination of results started in a previous - request. + :type limit: string + :param limit: Specifies the maximum number of parts returned in the + response body. If this value is not specified, the List Parts + operation returns up to 1,000 uploads. """ params = {} if limit: @@ -602,7 +1192,55 @@ class Layer1(AWSAuthConnection): def upload_part(self, vault_name, upload_id, linear_hash, tree_hash, byte_range, part_data): """ - Lists in-progress multipart uploads for the specified vault. + This operation uploads a part of an archive. You can upload + archive parts in any order. You can also upload them in + parallel. You can upload up to 10,000 parts for a multipart + upload. + + Amazon Glacier rejects your upload part request if any of the + following conditions is true: + + + + **SHA256 tree hash does not match**To ensure that part data + is not corrupted in transmission, you compute a SHA256 tree + hash of the part and include it in your request. Upon + receiving the part data, Amazon Glacier also computes a SHA256 + tree hash. If these hash values don't match, the operation + fails. For information about computing a SHA256 tree hash, see + `Computing Checksums`_. + + **Part size does not match**The size of each part except the + last must match the size specified in the corresponding + InitiateMultipartUpload request. The size of the last part + must be the same size as, or smaller than, the specified size. + If you upload a part whose size is smaller than the part size + you specified in your initiate multipart upload request and + that part is not the last part, then the upload part request + will succeed. However, the subsequent Complete Multipart + Upload request will fail. + + **Range does not align**The byte range value in the request + does not align with the part size specified in the + corresponding initiate request. For example, if you specify a + part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 + MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid + part ranges. However, if you set a range value of 2 MB to 6 + MB, the range does not align with the part size and the upload + will fail. + + + This operation is idempotent. If you upload the same part + multiple times, the data included in the most recent request + overwrites the previously uploaded data. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Upload Part `_ in the Amazon Glacier Developer Guide . :type vault_name: str :param vault_name: The name of the vault. @@ -621,8 +1259,11 @@ class Layer1(AWSAuthConnection): operation. :type byte_range: tuple of ints - :param byte_range: Identfies the range of bytes in the assembled - archive that will be uploaded in this part. + :param byte_range: Identifies the range of bytes in the assembled + archive that will be uploaded in this part. Amazon Glacier uses + this information to assemble the archive in the proper sequence. + The format of this header follows RFC 2616. An example header is + Content-Range:bytes 0-4194303/*. :type part_data: bytes :param part_data: The data to be uploaded for the part diff --git a/awx/lib/site-packages/boto/glacier/vault.py b/awx/lib/site-packages/boto/glacier/vault.py index 0186dbd381..e7d4e27d24 100644 --- a/awx/lib/site-packages/boto/glacier/vault.py +++ b/awx/lib/site-packages/boto/glacier/vault.py @@ -300,7 +300,9 @@ class Vault(object): return self.get_job(response['JobId']) def retrieve_inventory(self, sns_topic=None, - description=None): + description=None, byte_range=None, + start_date=None, end_date=None, + limit=None): """ Initiate a inventory retrieval job to list the items in the vault. You will need to wait for the notification from @@ -315,6 +317,18 @@ class Vault(object): sends notification when the job is completed and the output is ready for you to download. + :type byte_range: str + :param byte_range: Range of bytes to retrieve. + + :type start_date: DateTime + :param start_date: Beginning of the date range to query. + + :type end_date: DateTime + :param end_date: End of the date range to query. + + :type limit: int + :param limit: Limits the number of results returned. + :rtype: str :return: The ID of the job """ @@ -323,6 +337,19 @@ class Vault(object): job_data['SNSTopic'] = sns_topic if description is not None: job_data['Description'] = description + if byte_range is not None: + job_data['RetrievalByteRange'] = byte_range + if start_date is not None or end_date is not None or limit is not None: + rparams = {} + + if start_date is not None: + rparams['StartDate'] = start_date.isoformat() + if end_date is not None: + rparams['EndDate'] = end_date.isoformat() + if limit is not None: + rparams['Limit'] = limit + + job_data['InventoryRetrievalParameters'] = rparams response = self.layer1.initiate_job(self.name, job_data) return response['JobId'] @@ -340,6 +367,18 @@ class Vault(object): sends notification when the job is completed and the output is ready for you to download. + :type byte_range: str + :param byte_range: Range of bytes to retrieve. + + :type start_date: DateTime + :param start_date: Beginning of the date range to query. + + :type end_date: DateTime + :param end_date: End of the date range to query. + + :type limit: int + :param limit: Limits the number of results returned. + :rtype: :class:`boto.glacier.job.Job` :return: A Job object representing the retrieval job. """ diff --git a/awx/lib/site-packages/boto/gs/bucketlistresultset.py b/awx/lib/site-packages/boto/gs/bucketlistresultset.py index 5e717a5024..db634cfd45 100644 --- a/awx/lib/site-packages/boto/gs/bucketlistresultset.py +++ b/awx/lib/site-packages/boto/gs/bucketlistresultset.py @@ -38,7 +38,7 @@ def versioned_bucket_lister(bucket, prefix='', delimiter='', generation_marker = rs.next_generation_marker more_results= rs.is_truncated -class VersionedBucketListResultSet: +class VersionedBucketListResultSet(object): """ A resultset for listing versions within a bucket. Uses the bucket_lister generator function and implements the iterator interface. This diff --git a/awx/lib/site-packages/boto/gs/connection.py b/awx/lib/site-packages/boto/gs/connection.py index 4c31979c71..9a2e4a2bbb 100644 --- a/awx/lib/site-packages/boto/gs/connection.py +++ b/awx/lib/site-packages/boto/gs/connection.py @@ -25,14 +25,14 @@ from boto.s3.connection import SubdomainCallingFormat from boto.s3.connection import check_lowercase_bucketname from boto.utils import get_utf8_value -class Location: +class Location(object): DEFAULT = 'US' EU = 'EU' class GSConnection(S3Connection): DefaultHost = 'storage.googleapis.com' - QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s' + QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s' def __init__(self, gs_access_key_id=None, gs_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, @@ -103,3 +103,27 @@ class GSConnection(S3Connection): raise self.provider.storage_response_error( response.status, response.reason, body) + def get_bucket(self, bucket_name, validate=True, headers=None): + """ + Retrieves a bucket by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. If + you are unsure if the bucket exists or not, you can use the + ``S3Connection.lookup`` method, which will either return a valid bucket + or ``None``. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to fetch all keys within the + given bucket. (Default: ``True``) + """ + bucket = self.bucket_class(self, bucket_name) + if validate: + bucket.get_all_keys(headers, maxkeys=0) + return bucket diff --git a/awx/lib/site-packages/boto/gs/key.py b/awx/lib/site-packages/boto/gs/key.py index 4417973b09..277e7c7150 100644 --- a/awx/lib/site-packages/boto/gs/key.py +++ b/awx/lib/site-packages/boto/gs/key.py @@ -109,6 +109,9 @@ class Key(S3Key): self.metageneration = resp.getheader('x-goog-metageneration', None) self.generation = resp.getheader('x-goog-generation', None) + def handle_restore_headers(self, response): + return + def handle_addl_headers(self, headers): for key, value in headers: if key == 'x-goog-hash': @@ -219,7 +222,7 @@ class Key(S3Key): with the stored object in the response. See http://goo.gl/sMkcC for details. """ - if self.bucket != None: + if self.bucket is not None: if res_download_handler: res_download_handler.get_file(self, fp, headers, cb, num_cb, torrent=torrent, @@ -528,7 +531,7 @@ class Key(S3Key): if hasattr(fp, 'name'): self.path = fp.name - if self.bucket != None: + if self.bucket is not None: if isinstance(fp, KeyFile): # Avoid EOF seek for KeyFile case as it's very inefficient. key = fp.getkey() @@ -552,12 +555,12 @@ class Key(S3Key): fp.seek(spos) size = self.size - if md5 == None: + if md5 is None: md5 = self.compute_md5(fp, size) self.md5 = md5[0] self.base64md5 = md5[1] - if self.name == None: + if self.name is None: self.name = self.md5 if not replace: @@ -792,7 +795,7 @@ class Key(S3Key): the acl will only be updated if its current metageneration number is this value. """ - if self.bucket != None: + if self.bucket is not None: self.bucket.set_acl(acl_or_str, self.name, headers=headers, generation=generation, if_generation=if_generation, @@ -809,7 +812,7 @@ class Key(S3Key): :rtype: :class:`.gs.acl.ACL` """ - if self.bucket != None: + if self.bucket is not None: return self.bucket.get_acl(self.name, headers=headers, generation=generation) @@ -824,7 +827,7 @@ class Key(S3Key): :rtype: str """ - if self.bucket != None: + if self.bucket is not None: return self.bucket.get_xml_acl(self.name, headers=headers, generation=generation) @@ -852,7 +855,7 @@ class Key(S3Key): the acl will only be updated if its current metageneration number is this value. """ - if self.bucket != None: + if self.bucket is not None: return self.bucket.set_xml_acl(acl_str, self.name, headers=headers, generation=generation, if_generation=if_generation, @@ -883,7 +886,7 @@ class Key(S3Key): the acl will only be updated if its current metageneration number is this value. """ - if self.bucket != None: + if self.bucket is not None: return self.bucket.set_canned_acl( acl_str, self.name, diff --git a/awx/lib/site-packages/boto/gs/resumable_upload_handler.py b/awx/lib/site-packages/boto/gs/resumable_upload_handler.py index d3d8629775..d3835e3a2b 100644 --- a/awx/lib/site-packages/boto/gs/resumable_upload_handler.py +++ b/awx/lib/site-packages/boto/gs/resumable_upload_handler.py @@ -102,13 +102,13 @@ class ResumableUploadHandler(object): # Ignore non-existent file (happens first time an upload # is attempted on a file), but warn user for other errors. if e.errno != errno.ENOENT: - # Will restart because self.tracker_uri == None. + # Will restart because self.tracker_uri is None. print('Couldn\'t read URI tracker file (%s): %s. Restarting ' 'upload from scratch.' % (self.tracker_file_name, e.strerror)) except InvalidUriError, e: # Warn user, but proceed (will restart because - # self.tracker_uri == None). + # self.tracker_uri is None). print('Invalid tracker URI (%s) found in URI tracker file ' '(%s). Restarting upload from scratch.' % (uri, self.tracker_file_name)) @@ -124,8 +124,9 @@ class ResumableUploadHandler(object): return f = None try: - f = open(self.tracker_file_name, 'w') - f.write(self.tracker_uri) + with os.fdopen(os.open(self.tracker_file_name, + os.O_WRONLY | os.O_CREAT, 0600), 'w') as f: + f.write(self.tracker_uri) except IOError, e: raise ResumableUploadException( 'Couldn\'t write URI tracker file (%s): %s.\nThis can happen' @@ -134,9 +135,6 @@ class ResumableUploadHandler(object): 'unwritable directory)' % (self.tracker_file_name, e.strerror), ResumableTransferDisposition.ABORT) - finally: - if f: - f.close() def _set_tracker_uri(self, uri): """ diff --git a/awx/lib/site-packages/boto/gs/user.py b/awx/lib/site-packages/boto/gs/user.py index 62f2cf56d5..c3072952f9 100644 --- a/awx/lib/site-packages/boto/gs/user.py +++ b/awx/lib/site-packages/boto/gs/user.py @@ -20,7 +20,7 @@ # IN THE SOFTWARE. -class User: +class User(object): def __init__(self, parent=None, id='', name=''): if parent: parent.owner = self diff --git a/awx/lib/site-packages/boto/handler.py b/awx/lib/site-packages/boto/handler.py index e11722bf42..f936ee8893 100644 --- a/awx/lib/site-packages/boto/handler.py +++ b/awx/lib/site-packages/boto/handler.py @@ -32,7 +32,7 @@ class XmlHandler(xml.sax.ContentHandler): def startElement(self, name, attrs): self.current_text = '' new_node = self.nodes[-1][1].startElement(name, attrs, self.connection) - if new_node != None: + if new_node is not None: self.nodes.append((name, new_node)) def endElement(self, name): diff --git a/awx/lib/site-packages/boto/https_connection.py b/awx/lib/site-packages/boto/https_connection.py index 4cbf5182db..147119531f 100644 --- a/awx/lib/site-packages/boto/https_connection.py +++ b/awx/lib/site-packages/boto/https_connection.py @@ -109,8 +109,12 @@ class CertValidatingHTTPSConnection(httplib.HTTPConnection): if hasattr(self, "timeout") and self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) sock.connect((self.host, self.port)) - boto.log.debug("wrapping ssl socket; CA certificate file=%s", - self.ca_certs) + msg = "wrapping ssl socket; " + if self.ca_certs: + msg += "CA certificate file=%s" %self.ca_certs + else: + msg += "using system provided SSL certs" + boto.log.debug(msg) self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, cert_reqs=ssl.CERT_REQUIRED, diff --git a/awx/lib/site-packages/boto/iam/__init__.py b/awx/lib/site-packages/boto/iam/__init__.py index fac7b5db31..3df44f2953 100644 --- a/awx/lib/site-packages/boto/iam/__init__.py +++ b/awx/lib/site-packages/boto/iam/__init__.py @@ -22,8 +22,8 @@ # this is here for backward compatibility # originally, the IAMConnection class was defined here -from connection import IAMConnection -from boto.regioninfo import RegionInfo +from boto.iam.connection import IAMConnection +from boto.regioninfo import RegionInfo, get_regions class IAMRegionInfo(RegionInfo): @@ -50,16 +50,22 @@ def regions(): :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [IAMRegionInfo(name='universal', - endpoint='iam.amazonaws.com', - connection_cls=IAMConnection), - IAMRegionInfo(name='us-gov-west-1', - endpoint='iam.us-gov.amazonaws.com', - connection_cls=IAMConnection), - IAMRegionInfo(name='cn-north-1', - endpoint='iam.cn-north-1.amazonaws.com.cn', - connection_cls=IAMConnection) - ] + regions = get_regions( + 'iam', + region_cls=IAMRegionInfo, + connection_cls=IAMConnection + ) + + # For historical reasons, we had a "universal" endpoint as well. + regions.append( + IAMRegionInfo( + name='universal', + endpoint='iam.amazonaws.com', + connection_cls=IAMConnection + ) + ) + + return regions def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/iam/connection.py b/awx/lib/site-packages/boto/iam/connection.py index 32b3ab2e3d..4872b27493 100644 --- a/awx/lib/site-packages/boto/iam/connection.py +++ b/awx/lib/site-packages/boto/iam/connection.py @@ -40,15 +40,16 @@ class IAMConnection(AWSQueryConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host='iam.amazonaws.com', - debug=0, https_connection_factory=None, - path='/', security_token=None, validate_certs=True): + debug=0, https_connection_factory=None, path='/', + security_token=None, validate_certs=True, profile_name=None): super(IAMConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, host, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] diff --git a/awx/lib/site-packages/boto/jsonresponse.py b/awx/lib/site-packages/boto/jsonresponse.py index 01e1f54ff7..5dab5af991 100644 --- a/awx/lib/site-packages/boto/jsonresponse.py +++ b/awx/lib/site-packages/boto/jsonresponse.py @@ -33,7 +33,7 @@ class XmlHandler(xml.sax.ContentHandler): def startElement(self, name, attrs): self.current_text = '' t = self.nodes[-1][1].startElement(name, attrs, self.connection) - if t != None: + if t is not None: if isinstance(t, tuple): self.nodes.append(t) else: diff --git a/awx/lib/site-packages/boto/kinesis/__init__.py b/awx/lib/site-packages/boto/kinesis/__init__.py index 1c19a3b25e..5fc33c742a 100644 --- a/awx/lib/site-packages/boto/kinesis/__init__.py +++ b/awx/lib/site-packages/boto/kinesis/__init__.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,11 +31,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.kinesis.layer1 import KinesisConnection - - return [RegionInfo(name='us-east-1', - endpoint='kinesis.us-east-1.amazonaws.com', - connection_cls=KinesisConnection), - ] + return get_regions('kinesis', connection_cls=KinesisConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/kinesis/layer1.py b/awx/lib/site-packages/boto/kinesis/layer1.py index f282d89886..ede695bed6 100644 --- a/awx/lib/site-packages/boto/kinesis/layer1.py +++ b/awx/lib/site-packages/boto/kinesis/layer1.py @@ -532,11 +532,10 @@ class KinesisConnection(AWSQueryConnection): placed and the sequence number that was assigned to the data record. - The `SequenceNumberForOrdering` sets the initial sequence - number for the partition key. Later `PutRecord` requests to - the same partition key (from the same client) will - automatically increase from `SequenceNumberForOrdering`, - ensuring strict sequential ordering. + Sequence numbers generally increase over time. To guarantee + strictly increasing ordering, use the + `SequenceNumberForOrdering` parameter. For more information, + see the `Amazon Kinesis Developer Guide`_. If a `PutRecord` request cannot be processed because of insufficient provisioned throughput on the shard involved in @@ -550,8 +549,10 @@ class KinesisConnection(AWSQueryConnection): :param stream_name: The name of the stream to put the data record into. :type data: blob - :param data: The data blob to put into the record, which will be Base64 - encoded. The maximum size of the data blob is 50 kilobytes (KB). + :param data: The data blob to put into the record, which is + Base64-encoded when the blob is serialized. + The maximum size of the data blob (the payload after + Base64-decoding) is 50 kilobytes (KB) Set `b64_encode` to disable automatic Base64 encoding. :type partition_key: string @@ -571,10 +572,12 @@ class KinesisConnection(AWSQueryConnection): partition key hash. :type sequence_number_for_ordering: string - :param sequence_number_for_ordering: The sequence number to use as the - initial number for the partition key. Subsequent calls to - `PutRecord` from the same client and for the same partition key - will increase from the `SequenceNumberForOrdering` value. + :param sequence_number_for_ordering: Guarantees strictly increasing + sequence numbers, for puts from the same client and to the same + partition key. Usage: set the `SequenceNumberForOrdering` of record + n to the sequence number of record n-1 (as returned in the + PutRecordResult when putting record n-1 ). If this parameter is not + set, records will be coarsely ordered based on arrival time. :type b64_encode: boolean :param b64_encode: Whether to Base64 encode `data`. Can be set to diff --git a/awx/lib/site-packages/boto/manage/cmdshell.py b/awx/lib/site-packages/boto/manage/cmdshell.py index 0da1c7a3e1..0d726412ee 100644 --- a/awx/lib/site-packages/boto/manage/cmdshell.py +++ b/awx/lib/site-packages/boto/manage/cmdshell.py @@ -118,7 +118,7 @@ class SSHClient(object): def run(self, command): """ Execute a command on the remote host. Return a tuple containing - an integer status and a two strings, the first containing stdout + an integer status and two strings, the first containing stdout and the second containing stderr from the command. """ boto.log.debug('running:%s on %s' % (command, self.server.instance_id)) @@ -182,7 +182,7 @@ class LocalClient(object): log_fp = StringIO.StringIO() process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - while process.poll() == None: + while process.poll() is None: time.sleep(1) t = process.communicate() log_fp.write(t[0]) diff --git a/awx/lib/site-packages/boto/manage/server.py b/awx/lib/site-packages/boto/manage/server.py index 49ed55bbf5..885db651e1 100644 --- a/awx/lib/site-packages/boto/manage/server.py +++ b/awx/lib/site-packages/boto/manage/server.py @@ -137,7 +137,7 @@ class CommandLineGetter(object): def get_region(self, params): region = params.get('region', None) - if isinstance(region, str) or isinstance(region, unicode): + if isinstance(region, basestring): region = boto.ec2.get_region(region) params['region'] = region if not region: @@ -189,7 +189,7 @@ class CommandLineGetter(object): def get_group(self, params): group = params.get('group', None) - if isinstance(group, str) or isinstance(group, unicode): + if isinstance(group, basestring): group_list = self.ec2.get_all_security_groups() for g in group_list: if g.name == group: @@ -202,7 +202,7 @@ class CommandLineGetter(object): def get_key(self, params): keypair = params.get('keypair', None) - if isinstance(keypair, str) or isinstance(keypair, unicode): + if isinstance(keypair, basestring): key_list = self.ec2.get_all_key_pairs() for k in key_list: if k.name == keypair: @@ -323,7 +323,7 @@ class Server(Model): i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances - if elastic_ip != None and instances.__len__() > 0: + if elastic_ip is not None and instances.__len__() > 0: instance = instances[0] print 'Waiting for instance to start so we can set its elastic IP address...' # Sometimes we get a message from ec2 that says that the instance does not exist. diff --git a/awx/lib/site-packages/boto/manage/task.py b/awx/lib/site-packages/boto/manage/task.py index 8271529a30..5d273c3193 100644 --- a/awx/lib/site-packages/boto/manage/task.py +++ b/awx/lib/site-packages/boto/manage/task.py @@ -105,7 +105,7 @@ class Task(Model): stdout=subprocess.PIPE, stderr=subprocess.PIPE) nsecs = 5 current_timeout = vtimeout - while process.poll() == None: + while process.poll() is None: boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout)) if nsecs >= current_timeout: current_timeout += vtimeout diff --git a/awx/lib/site-packages/boto/manage/volume.py b/awx/lib/site-packages/boto/manage/volume.py index 841c124769..fae9df464d 100644 --- a/awx/lib/site-packages/boto/manage/volume.py +++ b/awx/lib/site-packages/boto/manage/volume.py @@ -136,7 +136,7 @@ class Volume(Model): if size < self.size: size = self.size ec2 = self.get_ec2_connection() - if self.zone_name == None or self.zone_name == '': + if self.zone_name is None or self.zone_name == '': # deal with the migration case where the zone is not set in the logical volume: current_volume = ec2.get_all_volumes([self.volume_id])[0] self.zone_name = current_volume.zone @@ -155,7 +155,7 @@ class Volume(Model): def get_ec2_connection(self): if self.server: return self.server.ec2 - if not hasattr(self, 'ec2') or self.ec2 == None: + if not hasattr(self, 'ec2') or self.ec2 is None: self.ec2 = boto.ec2.connect_to_region(self.region_name) return self.ec2 @@ -209,7 +209,7 @@ class Volume(Model): def detach(self, force=False): state = self.attachment_state - if state == 'available' or state == None or state == 'detaching': + if state == 'available' or state is None or state == 'detaching': print 'already detached' return None ec2 = self.get_ec2_connection() @@ -218,7 +218,7 @@ class Volume(Model): self.put() def checkfs(self, use_cmd=None): - if self.server == None: + if self.server is None: raise ValueError('server attribute must be set to run this command') # detemine state of file system on volume, only works if attached if use_cmd: @@ -233,7 +233,7 @@ class Volume(Model): return True def wait(self): - if self.server == None: + if self.server is None: raise ValueError('server attribute must be set to run this command') with closing(self.server.get_cmdshell()) as cmd: # wait for the volume device to appear @@ -243,7 +243,7 @@ class Volume(Model): time.sleep(10) def format(self): - if self.server == None: + if self.server is None: raise ValueError('server attribute must be set to run this command') status = None with closing(self.server.get_cmdshell()) as cmd: @@ -253,7 +253,7 @@ class Volume(Model): return status def mount(self): - if self.server == None: + if self.server is None: raise ValueError('server attribute must be set to run this command') boto.log.info('handle_mount_point') with closing(self.server.get_cmdshell()) as cmd: @@ -302,7 +302,7 @@ class Volume(Model): # we need to freeze the XFS file system try: self.freeze() - if self.server == None: + if self.server is None: snapshot = self.get_ec2_connection().create_snapshot(self.volume_id) else: snapshot = self.server.ec2.create_snapshot(self.volume_id) diff --git a/awx/lib/site-packages/boto/mashups/order.py b/awx/lib/site-packages/boto/mashups/order.py index 6efdc3ecab..c4deebfff5 100644 --- a/awx/lib/site-packages/boto/mashups/order.py +++ b/awx/lib/site-packages/boto/mashups/order.py @@ -179,7 +179,7 @@ class Order(IObject): item.ami.id, item.groups, item.key.name) def place(self, block=True): - if get_domain() == None: + if get_domain() is None: print 'SDB Persistence Domain not set' domain_name = self.get_string('Specify SDB Domain') set_domain(domain_name) diff --git a/awx/lib/site-packages/boto/mturk/connection.py b/awx/lib/site-packages/boto/mturk/connection.py index ed7ba9d23a..ff011ff652 100644 --- a/awx/lib/site-packages/boto/mturk/connection.py +++ b/awx/lib/site-packages/boto/mturk/connection.py @@ -46,7 +46,8 @@ class MTurkConnection(AWSQueryConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=None, debug=0, - https_connection_factory=None): + https_connection_factory=None, security_token=None, + profile_name=None): if not host: if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True': host = 'mechanicalturk.sandbox.amazonaws.com' @@ -58,7 +59,9 @@ class MTurkConnection(AWSQueryConnection): aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, host, debug, - https_connection_factory) + https_connection_factory, + security_token=security_token, + profile_name=profile_name) def _required_auth_capability(self): return ['mturk'] diff --git a/awx/lib/site-packages/boto/mturk/layoutparam.py b/awx/lib/site-packages/boto/mturk/layoutparam.py index 781f981daa..de7989554e 100644 --- a/awx/lib/site-packages/boto/mturk/layoutparam.py +++ b/awx/lib/site-packages/boto/mturk/layoutparam.py @@ -22,7 +22,7 @@ class LayoutParameters(object): def __init__(self, layoutParameters=None): - if layoutParameters == None: + if layoutParameters is None: layoutParameters = [] self.layoutParameters = layoutParameters diff --git a/awx/lib/site-packages/boto/mturk/qualification.py b/awx/lib/site-packages/boto/mturk/qualification.py index 4b518c89e5..4fc230f9df 100644 --- a/awx/lib/site-packages/boto/mturk/qualification.py +++ b/awx/lib/site-packages/boto/mturk/qualification.py @@ -22,7 +22,7 @@ class Qualifications(object): def __init__(self, requirements=None): - if requirements == None: + if requirements is None: requirements = [] self.requirements = requirements diff --git a/awx/lib/site-packages/boto/mws/connection.py b/awx/lib/site-packages/boto/mws/connection.py index d9a2003558..7c068b52de 100644 --- a/awx/lib/site-packages/boto/mws/connection.py +++ b/awx/lib/site-packages/boto/mws/connection.py @@ -78,7 +78,7 @@ def http_body(field): def decorator(func): def wrapper(*args, **kw): - if filter(lambda x: not x in kw, (field, 'content_type')): + if any([f not in kw for f in (field, 'content_type')]): message = "{0} requires {1} and content_type arguments for " \ "building HTTP body".format(func.action, field) raise KeyError(message) @@ -94,16 +94,18 @@ def http_body(field): return decorator -def destructure_object(value, into={}, prefix=''): +def destructure_object(value, into, prefix=''): if isinstance(value, ResponseElement): - for name, attr in value.__dict__.items(): + destructure_object(value.__dict__, into, prefix=prefix) + elif isinstance(value, dict): + for name, attr in value.iteritems(): if name.startswith('_'): continue - destructure_object(attr, into=into, prefix=prefix + '.' + name) - elif filter(lambda x: isinstance(value, x), (list, set, tuple)): - for index, element in [(prefix + '.' + str(i + 1), value[i]) - for i in range(len(value))]: - destructure_object(element, into=into, prefix=index) + destructure_object(attr, into, prefix=prefix + '.' + name) + elif any([isinstance(value, typ) for typ in (list, set, tuple)]): + for index, element in enumerate(value): + newprefix = prefix + '.' + str(index + 1) + destructure_object(element, into, prefix=newprefix) elif isinstance(value, bool): into[prefix] = str(value).lower() else: @@ -116,10 +118,10 @@ def structured_objects(*fields): def wrapper(*args, **kw): for field in filter(kw.has_key, fields): - destructure_object(kw.pop(field), into=kw, prefix=field) + destructure_object(kw.pop(field), kw, prefix=field) return func(*args, **kw) - wrapper.__doc__ = "{0}\nObjects: {1}".format(func.__doc__, - ', '.join(fields)) + wrapper.__doc__ = "{0}\nObjects|dicts: {1}".format(func.__doc__, + ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator @@ -203,7 +205,7 @@ def boolean_arguments(*fields): def decorator(func): def wrapper(*args, **kw): - for field in filter(lambda x: isinstance(kw.get(x), bool), fields): + for field in [f for f in fields if isinstance(kw.get(f), bool)]: kw[field] = str(kw[field]).lower() return func(*args, **kw) wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__, @@ -256,11 +258,13 @@ class MWSConnection(AWSQueryConnection): def _required_auth_capability(self): return ['mws'] - def post_request(self, path, params, cls, body='', headers={}, isXML=True): + def post_request(self, path, params, cls, body='', headers=None, + isXML=True): """Make a POST request, optionally with a content body, and return the response, optionally as raw text. Modelled off of the inherited get_object/make_request flow. """ + headers = headers or {} request = self.build_base_http_request('POST', path, None, data=body, params=params, headers=headers, host=self.host) @@ -321,9 +325,10 @@ class MWSConnection(AWSQueryConnection): @structured_lists('MarketplaceIdList.Id') @requires(['FeedType']) @api_action('Feeds', 15, 120) - def submit_feed(self, path, response, headers={}, body='', **kw): + def submit_feed(self, path, response, headers=None, body='', **kw): """Uploads a feed for processing by Amazon MWS. """ + headers = headers or {} return self.post_request(path, kw, response, body=body, headers=headers) diff --git a/awx/lib/site-packages/boto/mws/response.py b/awx/lib/site-packages/boto/mws/response.py index 064f5d7f1f..0960e46e5f 100644 --- a/awx/lib/site-packages/boto/mws/response.py +++ b/awx/lib/site-packages/boto/mws/response.py @@ -609,7 +609,7 @@ class ProductCategory(ResponseElement): class GetProductCategoriesResult(ResponseElement): - Self = Element(ProductCategory) + Self = ElementList(ProductCategory) class GetProductCategoriesForSKUResult(GetProductCategoriesResult): diff --git a/awx/lib/site-packages/boto/opsworks/__init__.py b/awx/lib/site-packages/boto/opsworks/__init__.py index e69de29bb2..71bc720953 100644 --- a/awx/lib/site-packages/boto/opsworks/__init__.py +++ b/awx/lib/site-packages/boto/opsworks/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Kinesis service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.opsworks.layer1 import OpsWorksConnection + return get_regions('opsworks', connection_cls=OpsWorksConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/provider.py b/awx/lib/site-packages/boto/provider.py index 6f77faaf64..2febdc9933 100644 --- a/awx/lib/site-packages/boto/provider.py +++ b/awx/lib/site-packages/boto/provider.py @@ -57,6 +57,7 @@ STORAGE_CLASS_HEADER_KEY = 'storage-class' MFA_HEADER_KEY = 'mfa-header' SERVER_SIDE_ENCRYPTION_KEY = 'server-side-encryption-header' VERSION_ID_HEADER_KEY = 'version-id-header' +RESTORE_HEADER_KEY = 'restore-header' STORAGE_COPY_ERROR = 'StorageCopyError' STORAGE_CREATE_ERROR = 'StorageCreateError' @@ -68,8 +69,10 @@ STORAGE_RESPONSE_ERROR = 'StorageResponseError' class Provider(object): CredentialMap = { - 'aws': ('aws_access_key_id', 'aws_secret_access_key'), - 'google': ('gs_access_key_id', 'gs_secret_access_key'), + 'aws': ('aws_access_key_id', 'aws_secret_access_key', + 'aws_security_token'), + 'google': ('gs_access_key_id', 'gs_secret_access_key', + None), } AclClassMap = { @@ -122,6 +125,7 @@ class Provider(object): VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id', STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class', MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa', + RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore', }, 'google': { HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX, @@ -144,6 +148,7 @@ class Provider(object): VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id', STORAGE_CLASS_HEADER_KEY: None, MFA_HEADER_KEY: None, + RESTORE_HEADER_KEY: None, } } @@ -165,18 +170,19 @@ class Provider(object): } def __init__(self, name, access_key=None, secret_key=None, - security_token=None): + security_token=None, profile_name=None): self.host = None self.port = None self.host_header = None self.access_key = access_key self.secret_key = secret_key self.security_token = security_token + self.profile_name = profile_name self.name = name self.acl_class = self.AclClassMap[self.name] self.canned_acls = self.CannedAclsMap[self.name] self._credential_expiry_time = None - self.get_credentials(access_key, secret_key) + self.get_credentials(access_key, secret_key, security_token, profile_name) self.configure_headers() self.configure_errors() # Allow config file to override default host and port. @@ -239,14 +245,18 @@ class Provider(object): else: return False - def get_credentials(self, access_key=None, secret_key=None): - access_key_name, secret_key_name = self.CredentialMap[self.name] + def get_credentials(self, access_key=None, secret_key=None, + security_token=None, profile_name=None): + access_key_name, secret_key_name, security_token_name = self.CredentialMap[self.name] if access_key is not None: self.access_key = access_key boto.log.debug("Using access key provided by client.") elif access_key_name.upper() in os.environ: self.access_key = os.environ[access_key_name.upper()] boto.log.debug("Using access key found in environment variable.") + elif config.has_option("profile %s" % profile_name, access_key_name): + self.access_key = config.get("profile %s" % profile_name, access_key_name) + boto.log.debug("Using access key found in config file: profile %s." % profile_name) elif config.has_option('Credentials', access_key_name): self.access_key = config.get('Credentials', access_key_name) boto.log.debug("Using access key found in config file.") @@ -257,6 +267,9 @@ class Provider(object): elif secret_key_name.upper() in os.environ: self.secret_key = os.environ[secret_key_name.upper()] boto.log.debug("Using secret key found in environment variable.") + elif config.has_option("profile %s" % profile_name, secret_key_name): + self.secret_key = config.get("profile %s" % profile_name, secret_key_name) + boto.log.debug("Using secret key found in config file: profile %s." % profile_name) elif config.has_option('Credentials', secret_key_name): self.secret_key = config.get('Credentials', secret_key_name) boto.log.debug("Using secret key found in config file.") @@ -273,6 +286,24 @@ class Provider(object): keyring_name, self.access_key) boto.log.debug("Using secret key found in keyring.") + if security_token is not None: + self.security_token = security_token + boto.log.debug("Using security token provided by client.") + elif ((security_token_name is not None) and + (access_key is None) and (secret_key is None)): + # Only provide a token from the environment/config if the + # caller did not specify a key and secret. Otherwise an + # environment/config token could be paired with a + # different set of credentials provided by the caller + if security_token_name.upper() in os.environ: + self.security_token = os.environ[security_token_name.upper()] + boto.log.debug("Using security token found in environment" + " variable.") + elif config.has_option('Credentials', security_token_name): + self.security_token = config.get('Credentials', + security_token_name) + boto.log.debug("Using security token found in config file.") + if ((self._access_key is None or self._secret_key is None) and self.MetadataServiceSupport[self.name]): self._populate_keys_from_metadata_server() @@ -332,6 +363,7 @@ class Provider(object): self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY] self.version_id = header_info_map[VERSION_ID_HEADER_KEY] self.mfa_header = header_info_map[MFA_HEADER_KEY] + self.restore_header = header_info_map[RESTORE_HEADER_KEY] def configure_errors(self): error_map = self.ErrorMap[self.name] diff --git a/awx/lib/site-packages/boto/pyami/config.py b/awx/lib/site-packages/boto/pyami/config.py index 28b6f6d860..6669cc052e 100644 --- a/awx/lib/site-packages/boto/pyami/config.py +++ b/awx/lib/site-packages/boto/pyami/config.py @@ -42,7 +42,7 @@ BotoConfigLocations = [BotoConfigPath] UserConfigPath = os.path.join(expanduser('~'), '.boto') BotoConfigLocations.append(UserConfigPath) -# If there's a BOTO_CONFIG variable set, we load ONLY +# If there's a BOTO_CONFIG variable set, we load ONLY # that variable if 'BOTO_CONFIG' in os.environ: BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])] @@ -149,14 +149,14 @@ class Config(ConfigParser.SafeConfigParser): except: val = default return val - + def getint(self, section, name, default=0): try: val = ConfigParser.SafeConfigParser.getint(self, section, name) except: val = int(default) return val - + def getfloat(self, section, name, default=0.0): try: val = ConfigParser.SafeConfigParser.getfloat(self, section, name) @@ -174,13 +174,13 @@ class Config(ConfigParser.SafeConfigParser): else: val = default return val - + def setbool(self, section, name, value): if value: self.set(section, name, 'true') else: self.set(section, name, 'false') - + def dump(self): s = StringIO.StringIO() self.write(s) @@ -196,7 +196,7 @@ class Config(ConfigParser.SafeConfigParser): fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option) else: fp.write('%s = %s\n' % (option, self.get(section, option))) - + def dump_to_sdb(self, domain_name, item_name): from boto.compat import json sdb = boto.connect_sdb() @@ -223,7 +223,7 @@ class Config(ConfigParser.SafeConfigParser): d = json.loads(item[section]) for attr_name in d.keys(): attr_value = d[attr_name] - if attr_value == None: + if attr_value is None: attr_value = 'None' if isinstance(attr_value, bool): self.setbool(section, attr_name, attr_value) diff --git a/awx/lib/site-packages/boto/pyami/installers/ubuntu/ebs.py b/awx/lib/site-packages/boto/pyami/installers/ubuntu/ebs.py index 45f5dbbe59..34d635fcc4 100644 --- a/awx/lib/site-packages/boto/pyami/installers/ubuntu/ebs.py +++ b/awx/lib/site-packages/boto/pyami/installers/ubuntu/ebs.py @@ -219,7 +219,7 @@ class EBSInstaller(Installer): # Set up the backup cleanup script minute = boto.config.get('EBS', 'backup_cleanup_cron_minute') hour = boto.config.get('EBS', 'backup_cleanup_cron_hour') - if (minute != None) and (hour != None): + if (minute is not None) and (hour is not None): # Snapshot clean up can either be done via the manage module, or via the new tag based # snapshot code, if the snapshots have been tagged with the name of the associated # volume. Check for the presence of the new configuration flag, and use the appropriate diff --git a/awx/lib/site-packages/boto/rds/__init__.py b/awx/lib/site-packages/boto/rds/__init__.py index d08d445d45..8e8afa81d4 100644 --- a/awx/lib/site-packages/boto/rds/__init__.py +++ b/awx/lib/site-packages/boto/rds/__init__.py @@ -31,6 +31,9 @@ from boto.rds.event import Event from boto.rds.regioninfo import RDSRegionInfo from boto.rds.dbsubnetgroup import DBSubnetGroup from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership +from boto.regioninfo import get_regions +from boto.rds.logfile import LogFile, LogFileObject + def regions(): """ @@ -39,27 +42,11 @@ def regions(): :rtype: list :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo` """ - return [RDSRegionInfo(name='us-east-1', - endpoint='rds.amazonaws.com'), - RDSRegionInfo(name='us-gov-west-1', - endpoint='rds.us-gov-west-1.amazonaws.com'), - RDSRegionInfo(name='eu-west-1', - endpoint='rds.eu-west-1.amazonaws.com'), - RDSRegionInfo(name='us-west-1', - endpoint='rds.us-west-1.amazonaws.com'), - RDSRegionInfo(name='us-west-2', - endpoint='rds.us-west-2.amazonaws.com'), - RDSRegionInfo(name='sa-east-1', - endpoint='rds.sa-east-1.amazonaws.com'), - RDSRegionInfo(name='ap-northeast-1', - endpoint='rds.ap-northeast-1.amazonaws.com'), - RDSRegionInfo(name='ap-southeast-1', - endpoint='rds.ap-southeast-1.amazonaws.com'), - RDSRegionInfo(name='ap-southeast-2', - endpoint='rds.ap-southeast-2.amazonaws.com'), - RDSRegionInfo(name='cn-north-1', - endpoint='rds.cn-north-1.amazonaws.com.cn'), - ] + return get_regions( + 'rds', + region_cls=RDSRegionInfo, + connection_cls=RDSConnection + ) def connect_to_region(region_name, **kw_params): @@ -94,7 +81,8 @@ class RDSConnection(AWSQueryConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, + profile_name=None): if not region: region = RDSRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) @@ -106,7 +94,8 @@ class RDSConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] @@ -1087,6 +1076,91 @@ class RDSConnection(AWSQueryConnection): return self.get_list('DescribeDBSnapshots', params, [('DBSnapshot', DBSnapshot)]) + def get_all_logs(self, dbinstance_id, max_records=None, marker=None, file_size=None, filename_contains=None, file_last_written=None): + """ + Get all log files + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. + + :type max_records: int + :param max_records: Number of log file names to return. + + :type marker: str + :param marker: The marker provided by a previous request. + + :file_size: int + :param file_size: Filter results to files large than this size in bytes. + + :filename_contains: str + :param filename_contains: Filter results to files with filename containing this string + + :file_last_written: int + :param file_last_written: Filter results to files written after this time (POSIX timestamp) + + :rtype: list + :return: A list of :class:`boto.rds.logfile.LogFile` + """ + params = {'DBInstanceIdentifier': dbinstance_id} + + if file_size: + params['FileSize'] = file_size + + if filename_contains: + params['FilenameContains'] = filename_contains + + if file_last_written: + params['FileLastWritten'] = file_last_written + + if marker: + params['Marker'] = marker + + if max_records: + params['MaxRecords'] = max_records + + return self.get_list('DescribeDBLogFiles', params, + [('DescribeDBLogFilesDetails',LogFile)]) + + def get_log_file(self, dbinstance_id, log_file_name, marker=None, number_of_lines=None, max_records=None): + """ + Download a log file from RDS + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. + + :type log_file_name: str + :param log_file_name: The name of the log file to retrieve + + :type marker: str + :param marker: A marker returned from a previous call to this method, or 0 to indicate the start of file. If + no marker is specified, this will fetch log lines from the end of file instead. + + :type number_of_lines: int + :param marker: The maximium number of lines to be returned. + """ + + params = { + 'DBInstanceIdentifier': dbinstance_id, + 'LogFileName': log_file_name, + } + + if marker: + params['Marker'] = marker + + if number_of_lines: + params['NumberOfLines'] = number_of_lines + + if max_records: + params['MaxRecords'] = max_records + + logfile = self.get_object('DownloadDBLogFilePortion', params, LogFileObject) + + if logfile: + logfile.log_filename = log_file_name + logfile.dbinstance_id = dbinstance_id + + return logfile + def create_dbsnapshot(self, snapshot_id, dbinstance_id): """ Create a new DB snapshot. @@ -1384,11 +1458,11 @@ class RDSConnection(AWSQueryConnection): :return: A list of :class:`boto.rds.dbsubnetgroup.DBSubnetGroup` """ params = dict() - if name != None: + if name is not None: params['DBSubnetGroupName'] = name - if max_records != None: + if max_records is not None: params['MaxRecords'] = max_records - if marker != None: + if marker is not None: params['Marker'] = marker return self.get_list('DescribeDBSubnetGroups', params, [('DBSubnetGroup',DBSubnetGroup)]) @@ -1407,9 +1481,9 @@ class RDSConnection(AWSQueryConnection): :return: The newly created ParameterGroup """ params = {'DBSubnetGroupName': name} - if description != None: + if description is not None: params['DBSubnetGroupDescription'] = description - if subnet_ids != None: + if subnet_ids is not None: self.build_list_params(params, subnet_ids, 'SubnetIds.member') return self.get_object('ModifyDBSubnetGroup', params, DBSubnetGroup) diff --git a/awx/lib/site-packages/boto/rds/dbsubnetgroup.py b/awx/lib/site-packages/boto/rds/dbsubnetgroup.py index 4b9fc58059..4f6bde8924 100644 --- a/awx/lib/site-packages/boto/rds/dbsubnetgroup.py +++ b/awx/lib/site-packages/boto/rds/dbsubnetgroup.py @@ -40,7 +40,7 @@ class DBSubnetGroup(object): self.connection = connection self.name = name self.description = description - if subnet_ids != None: + if subnet_ids is not None: self.subnet_ids = subnet_ids else: self.subnet_ids = [] diff --git a/awx/lib/site-packages/boto/rds/logfile.py b/awx/lib/site-packages/boto/rds/logfile.py new file mode 100644 index 0000000000..dd80a6ff82 --- /dev/null +++ b/awx/lib/site-packages/boto/rds/logfile.py @@ -0,0 +1,68 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Jumping Qu http://newrice.blogspot.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LogFile(object): + + def __init__(self, connection=None): + self.connection = connection + self.size = None + self.log_filename = None + self.last_written = None + + def __repr__(self): + #return '(%s, %s, %s)' % (self.logfilename, self.size, self.lastwritten) + return '%s' % (self.log_filename) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'LastWritten': + self.last_written = value + elif name == 'LogFileName': + self.log_filename = value + elif name == 'Size': + self.size = value + else: + setattr(self, name, value) + + +class LogFileObject(object): + def __init__(self, connection=None): + self.connection = connection + self.log_filename = None + + def __repr__(self): + return "LogFileObject: %s/%s" % (self.dbinstance_id, self.log_filename) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'LogFileData': + self.data = value + elif name == 'AdditionalDataPending': + self.additional_data_pending = value + elif name == 'Marker': + self.marker = value + else: + setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/rds/parametergroup.py b/awx/lib/site-packages/boto/rds/parametergroup.py index e52890cf73..ade3b807e7 100644 --- a/awx/lib/site-packages/boto/rds/parametergroup.py +++ b/awx/lib/site-packages/boto/rds/parametergroup.py @@ -133,7 +133,7 @@ class Parameter(object): d[prefix+'ApplyMethod'] = self.apply_method def _set_string_value(self, value): - if not isinstance(value, str) or isinstance(value, unicode): + if not isinstance(value, basestring): raise ValueError('value must be of type str') if self.allowed_values: choices = self.allowed_values.split(',') @@ -142,7 +142,7 @@ class Parameter(object): self._value = value def _set_integer_value(self, value): - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): value = int(value) if isinstance(value, int) or isinstance(value, long): if self.allowed_values: @@ -156,7 +156,7 @@ class Parameter(object): def _set_boolean_value(self, value): if isinstance(value, bool): self._value = value - elif isinstance(value, str) or isinstance(value, unicode): + elif isinstance(value, basestring): if value.lower() == 'true': self._value = True else: @@ -175,7 +175,7 @@ class Parameter(object): raise TypeError('unknown type (%s)' % self.type) def get_value(self): - if self._value == None: + if self._value is None: return self._value if self.type == 'string': return self._value diff --git a/awx/lib/site-packages/boto/rds/regioninfo.py b/awx/lib/site-packages/boto/rds/regioninfo.py index 376dc9f047..5019aca90f 100644 --- a/awx/lib/site-packages/boto/rds/regioninfo.py +++ b/awx/lib/site-packages/boto/rds/regioninfo.py @@ -26,7 +26,8 @@ from boto.regioninfo import RegionInfo class RDSRegionInfo(RegionInfo): - def __init__(self, connection=None, name=None, endpoint=None): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): from boto.rds import RDSConnection super(RDSRegionInfo, self).__init__(connection, name, endpoint, RDSConnection) diff --git a/awx/lib/site-packages/boto/rds2/__init__.py b/awx/lib/site-packages/boto/rds2/__init__.py new file mode 100644 index 0000000000..023a0baa95 --- /dev/null +++ b/awx/lib/site-packages/boto/rds2/__init__.py @@ -0,0 +1,53 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the RDS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.rds2.layer1 import RDSConnection + return get_regions('rds', connection_cls=RDSConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.rds2.layer1.RDSConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.rds2.layer1.RDSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/rds2/exceptions.py b/awx/lib/site-packages/boto/rds2/exceptions.py new file mode 100644 index 0000000000..be610b0171 --- /dev/null +++ b/awx/lib/site-packages/boto/rds2/exceptions.py @@ -0,0 +1,234 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class InvalidSubnet(JSONResponseError): + pass + + +class DBParameterGroupQuotaExceeded(JSONResponseError): + pass + + +class DBSubnetGroupAlreadyExists(JSONResponseError): + pass + + +class DBSubnetGroupQuotaExceeded(JSONResponseError): + pass + + +class InstanceQuotaExceeded(JSONResponseError): + pass + + +class InvalidRestore(JSONResponseError): + pass + + +class InvalidDBParameterGroupState(JSONResponseError): + pass + + +class AuthorizationQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupAlreadyExists(JSONResponseError): + pass + + +class InsufficientDBInstanceCapacity(JSONResponseError): + pass + + +class ReservedDBInstanceQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupNotFound(JSONResponseError): + pass + + +class DBInstanceAlreadyExists(JSONResponseError): + pass + + +class ReservedDBInstanceNotFound(JSONResponseError): + pass + + +class DBSubnetGroupDoesNotCoverEnoughAZs(JSONResponseError): + pass + + +class InvalidDBSecurityGroupState(JSONResponseError): + pass + + +class InvalidVPCNetworkState(JSONResponseError): + pass + + +class ReservedDBInstancesOfferingNotFound(JSONResponseError): + pass + + +class SNSTopicArnNotFound(JSONResponseError): + pass + + +class SNSNoAuthorization(JSONResponseError): + pass + + +class SnapshotQuotaExceeded(JSONResponseError): + pass + + +class OptionGroupQuotaExceeded(JSONResponseError): + pass + + +class DBParameterGroupNotFound(JSONResponseError): + pass + + +class SNSInvalidTopic(JSONResponseError): + pass + + +class InvalidDBSubnetGroupState(JSONResponseError): + pass + + +class DBSubnetGroupNotFound(JSONResponseError): + pass + + +class InvalidOptionGroupState(JSONResponseError): + pass + + +class SourceNotFound(JSONResponseError): + pass + + +class SubscriptionCategoryNotFound(JSONResponseError): + pass + + +class EventSubscriptionQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupNotSupported(JSONResponseError): + pass + + +class InvalidEventSubscriptionState(JSONResponseError): + pass + + +class InvalidDBSubnetState(JSONResponseError): + pass + + +class InvalidDBSnapshotState(JSONResponseError): + pass + + +class SubscriptionAlreadyExist(JSONResponseError): + pass + + +class DBSecurityGroupQuotaExceeded(JSONResponseError): + pass + + +class ProvisionedIopsNotAvailableInAZ(JSONResponseError): + pass + + +class AuthorizationNotFound(JSONResponseError): + pass + + +class OptionGroupAlreadyExists(JSONResponseError): + pass + + +class SubscriptionNotFound(JSONResponseError): + pass + + +class DBUpgradeDependencyFailure(JSONResponseError): + pass + + +class PointInTimeRestoreNotEnabled(JSONResponseError): + pass + + +class AuthorizationAlreadyExists(JSONResponseError): + pass + + +class DBSubnetQuotaExceeded(JSONResponseError): + pass + + +class OptionGroupNotFound(JSONResponseError): + pass + + +class DBParameterGroupAlreadyExists(JSONResponseError): + pass + + +class DBInstanceNotFound(JSONResponseError): + pass + + +class ReservedDBInstanceAlreadyExists(JSONResponseError): + pass + + +class InvalidDBInstanceState(JSONResponseError): + pass + + +class DBSnapshotNotFound(JSONResponseError): + pass + + +class DBSnapshotAlreadyExists(JSONResponseError): + pass + + +class StorageQuotaExceeded(JSONResponseError): + pass + + +class SubnetAlreadyInUse(JSONResponseError): + pass diff --git a/awx/lib/site-packages/boto/rds2/layer1.py b/awx/lib/site-packages/boto/rds2/layer1.py new file mode 100644 index 0000000000..1e2ba53793 --- /dev/null +++ b/awx/lib/site-packages/boto/rds2/layer1.py @@ -0,0 +1,3774 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +try: + import json +except ImportError: + import simplejson as json + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.rds2 import exceptions + + +class RDSConnection(AWSQueryConnection): + """ + Amazon Relational Database Service + Amazon Relational Database Service (Amazon RDS) is a web service + that makes it easier to set up, operate, and scale a relational + database in the cloud. It provides cost-efficient, resizable + capacity for an industry-standard relational database and manages + common database administration tasks, freeing up developers to + focus on what makes their applications and businesses unique. + + Amazon RDS gives you access to the capabilities of a familiar + MySQL or Oracle database server. This means the code, + applications, and tools you already use today with your existing + MySQL or Oracle databases work with Amazon RDS without + modification. Amazon RDS automatically backs up your database and + maintains the database software that powers your DB instance. + Amazon RDS is flexible: you can scale your database instance's + compute resources and storage capacity to meet your application's + demand. As with all Amazon Web Services, there are no up-front + investments, and you pay only for the resources you use. + + This is the Amazon RDS API Reference . It contains a comprehensive + description of all Amazon RDS Query APIs and data types. Note that + this API is asynchronous and some actions may require polling to + determine when an action has been applied. See the parameter + description to determine if a change is applied immediately or on + the next instance reboot or during the maintenance window. For + more information on Amazon RDS concepts and usage scenarios, go to + the `Amazon RDS User Guide`_. + """ + APIVersion = "2013-09-09" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidSubnet": exceptions.InvalidSubnet, + "DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded, + "DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists, + "DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded, + "InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded, + "InvalidRestore": exceptions.InvalidRestore, + "InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState, + "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded, + "DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists, + "InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity, + "ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded, + "DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound, + "DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists, + "ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound, + "DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs, + "InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState, + "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState, + "ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound, + "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound, + "SNSNoAuthorization": exceptions.SNSNoAuthorization, + "SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded, + "OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded, + "DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound, + "SNSInvalidTopic": exceptions.SNSInvalidTopic, + "InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState, + "DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound, + "InvalidOptionGroupState": exceptions.InvalidOptionGroupState, + "SourceNotFound": exceptions.SourceNotFound, + "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound, + "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded, + "DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported, + "InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState, + "InvalidDBSubnetState": exceptions.InvalidDBSubnetState, + "InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState, + "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist, + "DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded, + "ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ, + "AuthorizationNotFound": exceptions.AuthorizationNotFound, + "OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists, + "SubscriptionNotFound": exceptions.SubscriptionNotFound, + "DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure, + "PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled, + "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists, + "DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded, + "OptionGroupNotFound": exceptions.OptionGroupNotFound, + "DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists, + "DBInstanceNotFound": exceptions.DBInstanceNotFound, + "ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists, + "InvalidDBInstanceState": exceptions.InvalidDBInstanceState, + "DBSnapshotNotFound": exceptions.DBSnapshotNotFound, + "DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists, + "StorageQuotaExceeded": exceptions.StorageQuotaExceeded, + "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(RDSConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_source_identifier_to_subscription(self, subscription_name, + source_identifier): + """ + Adds a source identifier to an existing RDS event notification + subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to add a source identifier to. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source to be added. An identifier must + begin with a letter and must contain only ASCII letters, digits, + and hyphens; it cannot end with a hyphen or contain two consecutive + hyphens. + + Constraints: + + + + If the source type is a DB instance, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is a DB security group, a `DBSecurityGroupName` + must be supplied. + + If the source type is a DB parameter group, a `DBParameterGroupName` + must be supplied. + + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be + supplied. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SourceIdentifier': source_identifier, + } + return self._make_request( + action='AddSourceIdentifierToSubscription', + verb='POST', + path='/', params=params) + + def add_tags_to_resource(self, resource_name, tags): + """ + Adds metadata tags to an Amazon RDS resource. These tags can + also be used with cost allocation reporting to track cost + associated with Amazon RDS resources, or used in Condition + statement in IAM policy for Amazon RDS. + + For an overview on tagging Amazon RDS resources, see `Tagging + Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource the tags will be added + to. This value is an Amazon Resource Name (ARN). For information + about creating an ARN, see ` Constructing an RDS Amazon Resource + Name (ARN)`_. + + :type tags: list + :param tags: The tags to be assigned to the Amazon RDS resource. + + """ + params = {'ResourceName': resource_name, } + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='AddTagsToResource', + verb='POST', + path='/', params=params) + + def authorize_db_security_group_ingress(self, db_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_id=None, + ec2_security_group_owner_id=None): + """ + Enables ingress to a DBSecurityGroup using one of two forms of + authorization. First, EC2 or VPC security groups can be added + to the DBSecurityGroup if the application using the database + is running on EC2 or VPC instances. Second, IP ranges are + available if the application accessing your database is + running on the Internet. Required parameters for this API are + one of CIDR range, EC2SecurityGroupId for VPC, or + (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or + EC2SecurityGroupId for non-VPC). + You cannot authorize ingress from an EC2 security group in one + Region to an Amazon RDS DB instance in another. You cannot + authorize ingress from a VPC security group in one VPC to an + Amazon RDS DB instance in another. + For an overview of CIDR ranges, go to the `Wikipedia + Tutorial`_. + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to add + authorization to. + + :type cidrip: string + :param cidrip: The IP range to authorize. + + :type ec2_security_group_name: string + :param ec2_security_group_name: Name of the EC2 security group to + authorize. For VPC DB security groups, `EC2SecurityGroupId` must be + provided. Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_id: string + :param ec2_security_group_id: Id of the EC2 security group to + authorize. For VPC DB security groups, `EC2SecurityGroupId` must be + provided. Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: AWS Account Number of the owner of + the EC2 security group specified in the EC2SecurityGroupName + parameter. The AWS Access Key ID is not an acceptable value. For + VPC DB security groups, `EC2SecurityGroupId` must be provided. + Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_id is not None: + params['EC2SecurityGroupId'] = ec2_security_group_id + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='AuthorizeDBSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def copy_db_snapshot(self, source_db_snapshot_identifier, + target_db_snapshot_identifier, tags=None): + """ + Copies the specified DBSnapshot. The source DBSnapshot must be + in the "available" state. + + :type source_db_snapshot_identifier: string + :param source_db_snapshot_identifier: The identifier for the source DB + snapshot. + Constraints: + + + + Must be the identifier for a valid system snapshot in the "available" + state. + + + Example: `rds:mydb-2012-04-02-00-01` + + :type target_db_snapshot_identifier: string + :param target_db_snapshot_identifier: The identifier for the copied + snapshot. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-db-snapshot` + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier, + 'TargetDBSnapshotIdentifier': target_db_snapshot_identifier, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CopyDBSnapshot', + verb='POST', + path='/', params=params) + + def create_db_instance(self, db_instance_identifier, allocated_storage, + db_instance_class, engine, master_username, + master_user_password, db_name=None, + db_security_groups=None, + vpc_security_group_ids=None, + availability_zone=None, db_subnet_group_name=None, + preferred_maintenance_window=None, + db_parameter_group_name=None, + backup_retention_period=None, + preferred_backup_window=None, port=None, + multi_az=None, engine_version=None, + auto_minor_version_upgrade=None, + license_model=None, iops=None, + option_group_name=None, character_set_name=None, + publicly_accessible=None, tags=None): + """ + Creates a new DB instance. + + :type db_name: string + :param db_name: The meaning of this parameter differs according to the + database engine you use. + **MySQL** + + The name of the database to create when the DB instance is created. If + this parameter is not specified, no database is created in the DB + instance. + + Constraints: + + + + Must contain 1 to 64 alphanumeric characters + + Cannot be a word reserved by the specified database engine + + + Type: String + + **Oracle** + + The Oracle System ID (SID) of the created DB instance. + + Default: `ORCL` + + Constraints: + + + + Cannot be longer than 8 characters + + + **SQL Server** + + Not applicable. Must be null. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier. This + parameter is stored as a lowercase string. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 + for SQL Server). + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + + Example: `mydbinstance` + + :type allocated_storage: integer + :param allocated_storage: The amount of storage (in gigabytes) to be + initially allocated for the database instance. + **MySQL** + + Constraints: Must be an integer from 5 to 1024. + + Type: Integer + + **Oracle** + + Constraints: Must be an integer from 10 to 1024. + + **SQL Server** + + Constraints: Must be an integer from 200 to 1024 (Standard Edition and + Enterprise Edition) or from 30 to 1024 (Express Edition and Web + Edition) + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the DB + instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` + + :type engine: string + :param engine: The name of the database engine to be used for this + instance. + Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` | + `sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web` + + :type master_username: string + :param master_username: + The name of master user for the client DB instance. + + **MySQL** + + Constraints: + + + + Must be 1 to 16 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + + Type: String + + **Oracle** + + Constraints: + + + + Must be 1 to 30 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + + **SQL Server** + + Constraints: + + + + Must be 1 to 128 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + :type master_user_password: string + :param master_user_password: The password for the master database user. + Can be any printable ASCII character except "/", '"', or "@". + Type: String + + **MySQL** + + Constraints: Must contain from 8 to 41 characters. + + **Oracle** + + Constraints: Must contain from 8 to 30 characters. + + **SQL Server** + + Constraints: Must contain from 8 to 128 characters. + + :type db_security_groups: list + :param db_security_groups: A list of DB security groups to associate + with this DB instance. + Default: The default DB security group for the database engine. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of EC2 VPC security groups to + associate with this DB instance. + Default: The default EC2 VPC security group for the DB subnet group's + VPC. + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone in the endpoint's + region. + + Example: `us-east-1d` + + Constraint: The AvailabilityZone parameter cannot be specified if the + MultiAZ parameter is set to `True`. The specified Availability Zone + must be in the same region as the current endpoint. + + :type db_subnet_group_name: string + :param db_subnet_group_name: A DB subnet group to associate with this + DB instance. + If there is no DB subnet group, then it is a non-VPC DB instance. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: A 30-minute window selected at random from an 8-hour block of + time per region, occurring on a random day of the week. To see the + time blocks available, see ` Adjusting the Preferred Maintenance + Window`_ in the Amazon RDS User Guide. + + Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + + Constraints: Minimum 30-minute window. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group to associate with this DB instance. + If this argument is omitted, the default DBParameterGroup for the + specified engine will be used. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days for which automated backups are retained. Setting + this parameter to a positive number enables backups. Setting this + parameter to 0 disables automated backups. + + Default: 1 + + Constraints: + + + + Must be a value from 0 to 8 + + Cannot be set to 0 if the DB instance is a master instance with read + replicas + + :type preferred_backup_window: string + :param preferred_backup_window: The daily time range during which + automated backups are created if automated backups are enabled, + using the `BackupRetentionPeriod` parameter. + Default: A 30-minute window selected at random from an 8-hour block of + time per region. See the Amazon RDS User Guide for the time blocks + for each region from which the default backup windows are assigned. + + Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be + Universal Time Coordinated (UTC). Must not conflict with the + preferred maintenance window. Must be at least 30 minutes. + + :type port: integer + :param port: The port number on which the database accepts connections. + **MySQL** + + Default: `3306` + + Valid Values: `1150-65535` + + Type: Integer + + **Oracle** + + Default: `1521` + + Valid Values: `1150-65535` + + **SQL Server** + + Default: `1433` + + Valid Values: `1150-65535` except for `1434` and `3389`. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + You cannot set the AvailabilityZone parameter if the MultiAZ + parameter is set to true. + + :type engine_version: string + :param engine_version: The version number of the database engine to + use. + **MySQL** + + Example: `5.1.42` + + Type: String + + **Oracle** + + Example: `11.2.0.2.v2` + + Type: String + + **SQL Server** + + Example: `10.50.2789.0.v1` + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor engine upgrades + will be applied automatically to the DB instance during the + maintenance window. + Default: `True` + + :type license_model: string + :param license_model: License model information for this DB instance. + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: Indicates that the DB instance should be + associated with the specified option group. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type character_set_name: string + :param character_set_name: For supported engines, indicates that the DB + instance should be associated with the specified CharacterSet. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'AllocatedStorage': allocated_storage, + 'DBInstanceClass': db_instance_class, + 'Engine': engine, + 'MasterUsername': master_username, + 'MasterUserPassword': master_user_password, + } + if db_name is not None: + params['DBName'] = db_name + if db_security_groups is not None: + self.build_list_params(params, + db_security_groups, + 'DBSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + if port is not None: + params['Port'] = port + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if character_set_name is not None: + params['CharacterSetName'] = character_set_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBInstance', + verb='POST', + path='/', params=params) + + def create_db_instance_read_replica(self, db_instance_identifier, + source_db_instance_identifier, + db_instance_class=None, + availability_zone=None, port=None, + auto_minor_version_upgrade=None, + iops=None, option_group_name=None, + publicly_accessible=None, tags=None): + """ + Creates a DB instance that acts as a read replica of a source + DB instance. + + All read replica DB instances are created as Single-AZ + deployments with backups disabled. All other DB instance + attributes (including DB security groups and DB parameter + groups) are inherited from the source DB instance, except as + specified below. + + The source DB instance must have backup retention enabled. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier of the read + replica. This is the unique key that identifies a DB instance. This + parameter is stored as a lowercase string. + + :type source_db_instance_identifier: string + :param source_db_instance_identifier: The identifier of the DB instance + that will act as the source for the read replica. Each DB instance + can have up to five read replicas. + Constraints: Must be the identifier of an existing DB instance that is + not already a read replica DB instance. + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the read + replica. + Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` + + Default: Inherits from the source DB instance. + + :type availability_zone: string + :param availability_zone: The Amazon EC2 Availability Zone that the + read replica will be created in. + Default: A random, system-chosen Availability Zone in the endpoint's + region. + + Example: `us-east-1d` + + :type port: integer + :param port: The port number that the DB instance uses for connections. + Default: Inherits from the source DB instance + + Valid Values: `1150-65535` + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor engine upgrades + will be applied automatically to the read replica during the + maintenance window. + Default: Inherits from the source DB instance + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + + :type option_group_name: string + :param option_group_name: The option group the DB instance will be + associated with. If omitted, the default option group for the + engine specified will be used. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'SourceDBInstanceIdentifier': source_db_instance_identifier, + } + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if port is not None: + params['Port'] = port + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBInstanceReadReplica', + verb='POST', + path='/', params=params) + + def create_db_parameter_group(self, db_parameter_group_name, + db_parameter_group_family, description, + tags=None): + """ + Creates a new DB parameter group. + + A DB parameter group is initially created with the default + parameters for the database engine used by the DB instance. To + provide custom values for any of the parameters, you must + modify the group after creating it using + ModifyDBParameterGroup . Once you've created a DB parameter + group, you need to associate it with your DB instance using + ModifyDBInstance . When you associate a new DB parameter group + with a running DB instance, you need to reboot the DB Instance + for the new DB parameter group and associated settings to take + effect. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + This value is stored as a lower-case string. + + :type db_parameter_group_family: string + :param db_parameter_group_family: The DB parameter group family name. A + DB parameter group can be associated with one and only one DB + parameter group family, and can be applied only to a DB instance + running a database engine and engine version compatible with that + DB parameter group family. + + :type description: string + :param description: The description for the DB parameter group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBParameterGroupName': db_parameter_group_name, + 'DBParameterGroupFamily': db_parameter_group_family, + 'Description': description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBParameterGroup', + verb='POST', + path='/', params=params) + + def create_db_security_group(self, db_security_group_name, + db_security_group_description, tags=None): + """ + Creates a new DB security group. DB security groups control + access to a DB instance. + + :type db_security_group_name: string + :param db_security_group_name: The name for the DB security group. This + value is stored as a lowercase string. + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + Must not be "Default" + + May not contain spaces + + + Example: `mysecuritygroup` + + :type db_security_group_description: string + :param db_security_group_description: The description for the DB + security group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBSecurityGroupName': db_security_group_name, + 'DBSecurityGroupDescription': db_security_group_description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSecurityGroup', + verb='POST', + path='/', params=params) + + def create_db_snapshot(self, db_snapshot_identifier, + db_instance_identifier, tags=None): + """ + Creates a DBSnapshot. The source DBInstance must be in + "available" state. + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: The identifier for the DB snapshot. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This is the unique key that identifies a DB + instance. This parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBSnapshotIdentifier': db_snapshot_identifier, + 'DBInstanceIdentifier': db_instance_identifier, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSnapshot', + verb='POST', + path='/', params=params) + + def create_db_subnet_group(self, db_subnet_group_name, + db_subnet_group_description, subnet_ids, + tags=None): + """ + Creates a new DB subnet group. DB subnet groups must contain + at least one subnet in at least two AZs in the region. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name for the DB subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. Must not be "Default". + + Example: `mySubnetgroup` + + :type db_subnet_group_description: string + :param db_subnet_group_description: The description for the DB subnet + group. + + :type subnet_ids: list + :param subnet_ids: The EC2 Subnet IDs for the DB subnet group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBSubnetGroupName': db_subnet_group_name, + 'DBSubnetGroupDescription': db_subnet_group_description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSubnetGroup', + verb='POST', + path='/', params=params) + + def create_event_subscription(self, subscription_name, sns_topic_arn, + source_type=None, event_categories=None, + source_ids=None, enabled=None, tags=None): + """ + Creates an RDS event notification subscription. This action + requires a topic ARN (Amazon Resource Name) created by either + the RDS console, the SNS console, or the SNS API. To obtain an + ARN with SNS, you must create a topic in Amazon SNS and + subscribe to the topic. The ARN is displayed in the SNS + console. + + You can specify the type of source (SourceType) you want to be + notified of, provide a list of RDS sources (SourceIds) that + triggers the events, and provide a list of event categories + (EventCategories) for events you want to be notified of. For + example, you can specify SourceType = db-instance, SourceIds = + mydbinstance1, mydbinstance2 and EventCategories = + Availability, Backup. + + If you specify both the SourceType and SourceIds, such as + SourceType = db-instance and SourceIdentifier = myDBInstance1, + you will be notified of all the db-instance events for the + specified source. If you specify a SourceType but do not + specify a SourceIdentifier, you will receive notice of the + events for that source type for all your RDS sources. If you + do not specify either the SourceType nor the SourceIdentifier, + you will be notified of events generated from all RDS sources + belonging to your customer account. + + :type subscription_name: string + :param subscription_name: The name of the subscription. + Constraints: The name must be less than 255 characters. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + created for event notification. The ARN is created by Amazon SNS + when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a DB instance, you would set this parameter to db-instance. if + this value is not specified, all events are returned. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + :type event_categories: list + :param event_categories: A list of event categories for a SourceType + that you want to subscribe to. You can see a list of the categories + for a given SourceType in the `Events`_ topic in the Amazon RDS + User Guide or by using the **DescribeEventCategories** action. + + :type source_ids: list + :param source_ids: + The list of identifiers of the event sources for which events will be + returned. If not specified, then all sources are included in the + response. An identifier must begin with a letter and must contain + only ASCII letters, digits, and hyphens; it cannot end with a + hyphen or contain two consecutive hyphens. + + Constraints: + + + + If SourceIds are supplied, SourceType must also be provided. + + If the source type is a DB instance, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is a DB security group, a `DBSecurityGroupName` + must be supplied. + + If the source type is a DB parameter group, a `DBParameterGroupName` + must be supplied. + + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be + supplied. + + :type enabled: boolean + :param enabled: A Boolean value; set to **true** to activate the + subscription, set to **false** to create the subscription but not + active it. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SnsTopicArn': sns_topic_arn, + } + if source_type is not None: + params['SourceType'] = source_type + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateEventSubscription', + verb='POST', + path='/', params=params) + + def create_option_group(self, option_group_name, engine_name, + major_engine_version, option_group_description, + tags=None): + """ + Creates a new option group. You can create up to 20 option + groups. + + :type option_group_name: string + :param option_group_name: Specifies the name of the option group to be + created. + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `myoptiongroup` + + :type engine_name: string + :param engine_name: Specifies the name of the engine that this option + group should be associated with. + + :type major_engine_version: string + :param major_engine_version: Specifies the major version of the engine + that this option group should be associated with. + + :type option_group_description: string + :param option_group_description: The description of the option group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'OptionGroupName': option_group_name, + 'EngineName': engine_name, + 'MajorEngineVersion': major_engine_version, + 'OptionGroupDescription': option_group_description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateOptionGroup', + verb='POST', + path='/', params=params) + + def delete_db_instance(self, db_instance_identifier, + skip_final_snapshot=None, + final_db_snapshot_identifier=None): + """ + The DeleteDBInstance action deletes a previously provisioned + DB instance. A successful response from the web service + indicates the request was received correctly. When you delete + a DB instance, all automated backups for that instance are + deleted and cannot be recovered. Manual DB snapshots of the DB + instance to be deleted are not deleted. + + If a final DB snapshot is requested the status of the RDS + instance will be "deleting" until the DB snapshot is created. + The API action `DescribeDBInstance` is used to monitor the + status of this operation. The action cannot be canceled or + reverted once submitted. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier for the DB instance to be deleted. This + parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type skip_final_snapshot: boolean + :param skip_final_snapshot: Determines whether a final DB snapshot is + created before the DB instance is deleted. If `True` is specified, + no DBSnapshot is created. If false is specified, a DB snapshot is + created before the DB instance is deleted. + The FinalDBSnapshotIdentifier parameter must be specified if + SkipFinalSnapshot is `False`. + + Default: `False` + + :type final_db_snapshot_identifier: string + :param final_db_snapshot_identifier: + The DBSnapshotIdentifier of the new DBSnapshot created when + SkipFinalSnapshot is set to `False`. + + Specifying this parameter and also setting the SkipFinalShapshot + parameter to true results in an error. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if skip_final_snapshot is not None: + params['SkipFinalSnapshot'] = str( + skip_final_snapshot).lower() + if final_db_snapshot_identifier is not None: + params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier + return self._make_request( + action='DeleteDBInstance', + verb='POST', + path='/', params=params) + + def delete_db_parameter_group(self, db_parameter_group_name): + """ + Deletes a specified DBParameterGroup. The DBParameterGroup + cannot be associated with any RDS instances to be deleted. + The specified DB parameter group cannot be associated with any + DB instances. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be the name of an existing DB parameter group + + You cannot delete a default DB parameter group + + Cannot be associated with any DB instances + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + return self._make_request( + action='DeleteDBParameterGroup', + verb='POST', + path='/', params=params) + + def delete_db_security_group(self, db_security_group_name): + """ + Deletes a DB security group. + The specified DB security group must not be associated with + any DB instances. + + :type db_security_group_name: string + :param db_security_group_name: + The name of the DB security group to delete. + + You cannot delete the default DB security group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + Must not be "Default" + + May not contain spaces + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + return self._make_request( + action='DeleteDBSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_db_snapshot(self, db_snapshot_identifier): + """ + Deletes a DBSnapshot. + The DBSnapshot must be in the `available` state to be deleted. + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: The DBSnapshot identifier. + Constraints: Must be the name of an existing DB snapshot in the + `available` state. + + """ + params = {'DBSnapshotIdentifier': db_snapshot_identifier, } + return self._make_request( + action='DeleteDBSnapshot', + verb='POST', + path='/', params=params) + + def delete_db_subnet_group(self, db_subnet_group_name): + """ + Deletes a DB subnet group. + The specified database subnet group must not be associated + with any DB instances. + + :type db_subnet_group_name: string + :param db_subnet_group_name: + The name of the database subnet group to delete. + + You cannot delete the default subnet group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBSubnetGroupName': db_subnet_group_name, } + return self._make_request( + action='DeleteDBSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_event_subscription(self, subscription_name): + """ + Deletes an RDS event notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to delete. + + """ + params = {'SubscriptionName': subscription_name, } + return self._make_request( + action='DeleteEventSubscription', + verb='POST', + path='/', params=params) + + def delete_option_group(self, option_group_name): + """ + Deletes an existing option group. + + :type option_group_name: string + :param option_group_name: + The name of the option group to be deleted. + + You cannot delete default option groups. + + """ + params = {'OptionGroupName': option_group_name, } + return self._make_request( + action='DeleteOptionGroup', + verb='POST', + path='/', params=params) + + def describe_db_engine_versions(self, engine=None, engine_version=None, + db_parameter_group_family=None, + max_records=None, marker=None, + default_only=None, + list_supported_character_sets=None): + """ + Returns a list of the available DB engines. + + :type engine: string + :param engine: The database engine to return. + + :type engine_version: string + :param engine_version: The database engine version to return. + Example: `5.1.49` + + :type db_parameter_group_family: string + :param db_parameter_group_family: + The name of a specific DB parameter group family to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + :type default_only: boolean + :param default_only: Indicates that only the default version of the + specified engine or engine and major version combination is + returned. + + :type list_supported_character_sets: boolean + :param list_supported_character_sets: If this parameter is specified, + and if the requested engine supports the CharacterSetName parameter + for CreateDBInstance, the response includes a list of supported + character sets for each engine version. + + """ + params = {} + if engine is not None: + params['Engine'] = engine + if engine_version is not None: + params['EngineVersion'] = engine_version + if db_parameter_group_family is not None: + params['DBParameterGroupFamily'] = db_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if default_only is not None: + params['DefaultOnly'] = str( + default_only).lower() + if list_supported_character_sets is not None: + params['ListSupportedCharacterSets'] = str( + list_supported_character_sets).lower() + return self._make_request( + action='DescribeDBEngineVersions', + verb='POST', + path='/', params=params) + + def describe_db_instances(self, db_instance_identifier=None, + filters=None, max_records=None, marker=None): + """ + Returns information about provisioned RDS instances. This API + supports pagination. + + :type db_instance_identifier: string + :param db_instance_identifier: + The user-supplied instance identifier. If this parameter is specified, + information from only the specific DB instance is returned. This + parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBInstances request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords` . + + """ + params = {} + if db_instance_identifier is not None: + params['DBInstanceIdentifier'] = db_instance_identifier + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBInstances', + verb='POST', + path='/', params=params) + + def describe_db_log_files(self, db_instance_identifier, + filename_contains=None, file_last_written=None, + file_size=None, max_records=None, marker=None): + """ + Returns a list of DB log files for the DB instance. + + :type db_instance_identifier: string + :param db_instance_identifier: + The customer-assigned name of the DB instance that contains the log + files you want to list. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filename_contains: string + :param filename_contains: Filters the available log files for log file + names that contain the specified string. + + :type file_last_written: long + :param file_last_written: Filters the available log files for files + written since the specified date, in POSIX timestamp format. + + :type file_size: long + :param file_size: Filters the available log files for files larger than + the specified size. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified MaxRecords + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + + :type marker: string + :param marker: The pagination token provided in the previous request. + If this parameter is specified the response includes only records + beyond the marker, up to MaxRecords. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if filename_contains is not None: + params['FilenameContains'] = filename_contains + if file_last_written is not None: + params['FileLastWritten'] = file_last_written + if file_size is not None: + params['FileSize'] = file_size + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBLogFiles', + verb='POST', + path='/', params=params) + + def describe_db_parameter_groups(self, db_parameter_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of `DBParameterGroup` descriptions. If a + `DBParameterGroupName` is specified, the list will contain + only the description of the specified DB parameter group. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of a specific DB parameter group to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBParameterGroups` request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = {} + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBParameterGroups', + verb='POST', + path='/', params=params) + + def describe_db_parameters(self, db_parameter_group_name, source=None, + max_records=None, marker=None): + """ + Returns the detailed parameter list for a particular DB + parameter group. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of a specific DB parameter group to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type source: string + :param source: The parameter types to return. + Default: All parameter types returned + + Valid Values: `user | system | engine-default` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBParameters` request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBParameters', + verb='POST', + path='/', params=params) + + def describe_db_security_groups(self, db_security_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of `DBSecurityGroup` descriptions. If a + `DBSecurityGroupName` is specified, the list will contain only + the descriptions of the specified DB security group. + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to + return details for. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBSecurityGroups request. If this parameter is specified, + the response includes only records beyond the marker, up to the + value specified by `MaxRecords`. + + """ + params = {} + if db_security_group_name is not None: + params['DBSecurityGroupName'] = db_security_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_db_snapshots(self, db_instance_identifier=None, + db_snapshot_identifier=None, + snapshot_type=None, filters=None, + max_records=None, marker=None): + """ + Returns information about DB snapshots. This API supports + pagination. + + :type db_instance_identifier: string + :param db_instance_identifier: + A DB instance identifier to retrieve the list of DB snapshots for. + Cannot be used in conjunction with `DBSnapshotIdentifier`. This + parameter is not case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: + A specific DB snapshot identifier to describe. Cannot be used in + conjunction with `DBInstanceIdentifier`. This value is stored as a + lowercase string. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + If this is the identifier of an automated snapshot, the + `SnapshotType` parameter must also be specified. + + :type snapshot_type: string + :param snapshot_type: The type of snapshots that will be returned. + Values can be "automated" or "manual." If not specified, the + returned results will include all snapshots types. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBSnapshots` request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if db_instance_identifier is not None: + params['DBInstanceIdentifier'] = db_instance_identifier + if db_snapshot_identifier is not None: + params['DBSnapshotIdentifier'] = db_snapshot_identifier + if snapshot_type is not None: + params['SnapshotType'] = snapshot_type + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSnapshots', + verb='POST', + path='/', params=params) + + def describe_db_subnet_groups(self, db_subnet_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of DBSubnetGroup descriptions. If a + DBSubnetGroupName is specified, the list will contain only the + descriptions of the specified DBSubnetGroup. + + For an overview of CIDR ranges, go to the `Wikipedia + Tutorial`_. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name of the DB subnet group to return + details for. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBSubnetGroups request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_engine_default_parameters(self, db_parameter_group_family, + max_records=None, marker=None): + """ + Returns the default engine and system parameter information + for the specified database engine. + + :type db_parameter_group_family: string + :param db_parameter_group_family: The name of the DB parameter group + family. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeEngineDefaultParameters` request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = { + 'DBParameterGroupFamily': db_parameter_group_family, + } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEngineDefaultParameters', + verb='POST', + path='/', params=params) + + def describe_event_categories(self, source_type=None): + """ + Displays a list of categories for all event source types, or, + if specified, for a specified source type. You can see a list + of the event categories and source types in the ` Events`_ + topic in the Amazon RDS User Guide. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + """ + params = {} + if source_type is not None: + params['SourceType'] = source_type + return self._make_request( + action='DescribeEventCategories', + verb='POST', + path='/', params=params) + + def describe_event_subscriptions(self, subscription_name=None, + filters=None, max_records=None, + marker=None): + """ + Lists all the subscription descriptions for a customer + account. The description for a subscription includes + SubscriptionName, SNSTopicARN, CustomerID, SourceType, + SourceID, CreationTime, and Status. + + If you specify a SubscriptionName, lists the description for + that subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to describe. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableDBInstanceOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords` . + + """ + params = {} + if subscription_name is not None: + params['SubscriptionName'] = subscription_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEventSubscriptions', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + event_categories=None, max_records=None, marker=None): + """ + Returns events related to DB instances, DB security groups, DB + snapshots, and DB parameter groups for the past 14 days. + Events specific to a particular DB instance, DB security + group, database snapshot, or DB parameter group can be + obtained by providing the name as a parameter. By default, the + past hour of events are returned. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source for which events will be returned. + If not specified, then all sources are included in the response. + + Constraints: + + + + If SourceIdentifier is supplied, SourceType must also be provided. + + If the source type is `DBInstance`, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must + be supplied. + + If the source type is `DBParameterGroup`, a `DBParameterGroupName` + must be supplied. + + If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be + supplied. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type source_type: string + :param source_type: The event source to retrieve events for. If no + value is specified, all events are returned. + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. For more information + about ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: 2009-07-08T18:00Z + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. For more information about + ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: 2009-07-08T18:00Z + + :type duration: integer + :param duration: The number of minutes to retrieve events for. + Default: 60 + + :type event_categories: list + :param event_categories: A list of event categories that trigger + notifications for a event notification subscription. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeEvents request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_option_group_options(self, engine_name, + major_engine_version=None, + max_records=None, marker=None): + """ + Describes all available options. + + :type engine_name: string + :param engine_name: A required parameter. Options available for the + given Engine name will be described. + + :type major_engine_version: string + :param major_engine_version: If specified, filters the results to + include only options for the specified major engine version. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {'EngineName': engine_name, } + if major_engine_version is not None: + params['MajorEngineVersion'] = major_engine_version + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOptionGroupOptions', + verb='POST', + path='/', params=params) + + def describe_option_groups(self, option_group_name=None, filters=None, + marker=None, max_records=None, + engine_name=None, major_engine_version=None): + """ + Describes the available option groups. + + :type option_group_name: string + :param option_group_name: The name of the option group to describe. + Cannot be supplied together with EngineName or MajorEngineVersion. + + :type filters: list + :param filters: + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOptionGroups request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type engine_name: string + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: string + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific database engine version. + If specified, then EngineName must also be specified. + + """ + params = {} + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if marker is not None: + params['Marker'] = marker + if max_records is not None: + params['MaxRecords'] = max_records + if engine_name is not None: + params['EngineName'] = engine_name + if major_engine_version is not None: + params['MajorEngineVersion'] = major_engine_version + return self._make_request( + action='DescribeOptionGroups', + verb='POST', + path='/', params=params) + + def describe_orderable_db_instance_options(self, engine, + engine_version=None, + db_instance_class=None, + license_model=None, vpc=None, + max_records=None, marker=None): + """ + Returns a list of orderable DB instance options for the + specified engine. + + :type engine: string + :param engine: The name of the engine to retrieve DB instance options + for. + + :type engine_version: string + :param engine_version: The engine version filter value. Specify this + parameter to show only the available offerings matching the + specified engine version. + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only the available offerings matching the + specified DB instance class. + + :type license_model: string + :param license_model: The license model filter value. Specify this + parameter to show only the available offerings matching the + specified license model. + + :type vpc: boolean + :param vpc: The VPC filter value. Specify this parameter to show only + the available VPC or non-VPC offerings. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableDBInstanceOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords` . + + """ + params = {'Engine': engine, } + if engine_version is not None: + params['EngineVersion'] = engine_version + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if license_model is not None: + params['LicenseModel'] = license_model + if vpc is not None: + params['Vpc'] = str( + vpc).lower() + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOrderableDBInstanceOptions', + verb='POST', + path='/', params=params) + + def describe_reserved_db_instances(self, reserved_db_instance_id=None, + reserved_db_instances_offering_id=None, + db_instance_class=None, duration=None, + product_description=None, + offering_type=None, multi_az=None, + filters=None, max_records=None, + marker=None): + """ + Returns information about reserved DB instances for this + account, or about a specified reserved DB instance. + + :type reserved_db_instance_id: string + :param reserved_db_instance_id: The reserved DB instance identifier + filter value. Specify this parameter to show only the reservation + that matches the specified reservation ID. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The offering identifier + filter value. Specify this parameter to show only purchased + reservations matching the specified offering identifier. + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only those reservations matching the + specified DB instances class. + + :type duration: string + :param duration: The duration filter value, specified in years or + seconds. Specify this parameter to show only reservations for this + duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: The product description filter value. + Specify this parameter to show only those reservations matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Specify this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type multi_az: boolean + :param multi_az: The Multi-AZ filter value. Specify this parameter to + show only those reservations matching the specified Multi-AZ + parameter. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {} + if reserved_db_instance_id is not None: + params['ReservedDBInstanceId'] = reserved_db_instance_id + if reserved_db_instances_offering_id is not None: + params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedDBInstances', + verb='POST', + path='/', params=params) + + def describe_reserved_db_instances_offerings(self, + reserved_db_instances_offering_id=None, + db_instance_class=None, + duration=None, + product_description=None, + offering_type=None, + multi_az=None, + max_records=None, + marker=None): + """ + Lists available reserved DB instance offerings. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The offering identifier + filter value. Specify this parameter to show only the available + offering that matches the specified reservation identifier. + Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706` + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only the available offerings matching the + specified DB instance class. + + :type duration: string + :param duration: Duration filter value, specified in years or seconds. + Specify this parameter to show only reservations for this duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: Product description filter value. Specify + this parameter to show only the available offerings matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Specify this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type multi_az: boolean + :param multi_az: The Multi-AZ filter value. Specify this parameter to + show only the available offerings matching the specified Multi-AZ + parameter. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {} + if reserved_db_instances_offering_id is not None: + params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedDBInstancesOfferings', + verb='POST', + path='/', params=params) + + def download_db_log_file_portion(self, db_instance_identifier, + log_file_name, marker=None, + number_of_lines=None): + """ + Downloads the last line of the specified log file. + + :type db_instance_identifier: string + :param db_instance_identifier: + The customer-assigned name of the DB instance that contains the log + files you want to list. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type log_file_name: string + :param log_file_name: The name of the log file to be downloaded. + + :type marker: string + :param marker: The pagination token provided in the previous request. + If this parameter is specified the response includes only records + beyond the marker, up to MaxRecords. + + :type number_of_lines: integer + :param number_of_lines: The number of lines remaining to be downloaded. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'LogFileName': log_file_name, + } + if marker is not None: + params['Marker'] = marker + if number_of_lines is not None: + params['NumberOfLines'] = number_of_lines + return self._make_request( + action='DownloadDBLogFilePortion', + verb='POST', + path='/', params=params) + + def list_tags_for_resource(self, resource_name): + """ + Lists all tags on an Amazon RDS resource. + + For an overview on tagging an Amazon RDS resource, see + `Tagging Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource with tags to be listed. + This value is an Amazon Resource Name (ARN). For information about + creating an ARN, see ` Constructing an RDS Amazon Resource Name + (ARN)`_. + + """ + params = {'ResourceName': resource_name, } + return self._make_request( + action='ListTagsForResource', + verb='POST', + path='/', params=params) + + def modify_db_instance(self, db_instance_identifier, + allocated_storage=None, db_instance_class=None, + db_security_groups=None, + vpc_security_group_ids=None, + apply_immediately=None, master_user_password=None, + db_parameter_group_name=None, + backup_retention_period=None, + preferred_backup_window=None, + preferred_maintenance_window=None, multi_az=None, + engine_version=None, + allow_major_version_upgrade=None, + auto_minor_version_upgrade=None, iops=None, + option_group_name=None, + new_db_instance_identifier=None): + """ + Modify settings for a DB instance. You can change one or more + database configuration parameters by specifying these + parameters and the new values in the request. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This value is stored as a lowercase string. + + Constraints: + + + + Must be the identifier for an existing DB instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type allocated_storage: integer + :param allocated_storage: The new storage capacity of the RDS instance. + Changing this parameter does not result in an outage and the change + is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + **MySQL** + + Default: Uses existing setting + + Valid Values: 5-1024 + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + Type: Integer + + **Oracle** + + Default: Uses existing setting + + Valid Values: 10-1024 + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + **SQL Server** + + Cannot be modified. + + If you choose to migrate your DB instance from using standard storage + to using Provisioned IOPS, or from using Provisioned IOPS to using + standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage + size, storage type (standard or Provisioned IOPS), amount of IOPS + provisioned (if any), and the number of prior scale storage + operations. Typical migration times are under 24 hours, but the + process can take up to several days in some cases. During the + migration, the DB instance will be available for use, but may + experience performance degradation. While the migration takes + place, nightly backups for the instance will be suspended. No other + Amazon RDS operations can take place for the instance, including + modifying the instance, rebooting the instance, deleting the + instance, creating a read replica for the instance, and creating a + DB snapshot of the instance. + + :type db_instance_class: string + :param db_instance_class: The new compute and memory capacity of the DB + instance. To determine the instance classes that are available for + a particular DB engine, use the DescribeOrderableDBInstanceOptions + action. + Passing a value for this parameter causes an outage during the change + and is applied during the next maintenance window, unless the + `ApplyImmediately` parameter is specified as `True` for this + request. + + Default: Uses existing setting + + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + :type db_security_groups: list + :param db_security_groups: + A list of DB security groups to authorize on this DB instance. Changing + this parameter does not result in an outage and the change is + asynchronously applied as soon as possible. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: + A list of EC2 VPC security groups to authorize on this DB instance. + This change is asynchronously applied as soon as possible. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type apply_immediately: boolean + :param apply_immediately: Specifies whether or not the modifications in + this request and any pending modifications are asynchronously + applied as soon as possible, regardless of the + `PreferredMaintenanceWindow` setting for the DB instance. + If this parameter is passed as `False`, changes to the DB instance are + applied on the next call to RebootDBInstance, the next maintenance + reboot, or the next failure reboot, whichever occurs first. See + each parameter to determine when a change is applied. + + Default: `False` + + :type master_user_password: string + :param master_user_password: + The new password for the DB instance master user. Can be any printable + ASCII character except "/", '"', or "@". + + Changing this parameter does not result in an outage and the change is + asynchronously applied as soon as possible. Between the time of the + request and the completion of the request, the `MasterUserPassword` + element exists in the `PendingModifiedValues` element of the + operation response. + + Default: Uses existing setting + + Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 + alphanumeric characters (Oracle), or 8 to 128 alphanumeric + characters (SQL Server). + + Amazon RDS API actions never return the password, so this action + provides a way to regain access to a master instance user if the + password is lost. + + :type db_parameter_group_name: string + :param db_parameter_group_name: The name of the DB parameter group to + apply to this DB instance. Changing this parameter does not result + in an outage and the change is applied during the next maintenance + window unless the `ApplyImmediately` parameter is set to `True` for + this request. + Default: Uses existing setting + + Constraints: The DB parameter group must be in the same DB parameter + group family as this DB instance. + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days to retain automated backups. Setting this parameter + to a positive number enables backups. Setting this parameter to 0 + disables automated backups. + + Changing this parameter can result in an outage if you change from 0 to + a non-zero value or from a non-zero value to 0. These changes are + applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. If + you change the parameter from one non-zero value to another non- + zero value, the change is asynchronously applied as soon as + possible. + + Default: Uses existing setting + + Constraints: + + + + Must be a value from 0 to 8 + + Cannot be set to 0 if the DB instance is a master instance with read + replicas or if the DB instance is a read replica + + :type preferred_backup_window: string + :param preferred_backup_window: + The daily time range during which automated backups are created if + automated backups are enabled, as determined by the + `BackupRetentionPeriod`. Changing this parameter does not result in + an outage and the change is asynchronously applied as soon as + possible. + + Constraints: + + + + Must be in the format hh24:mi-hh24:mi + + Times should be Universal Time Coordinated (UTC) + + Must not conflict with the preferred maintenance window + + Must be at least 30 minutes + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur, which may result in an + outage. Changing this parameter does not result in an outage, + except in the following situation, and the change is asynchronously + applied as soon as possible. If there are pending actions that + cause a reboot, and the maintenance window is changed to include + the current time, then changing this parameter will cause a reboot + of the DB instance. If moving this window to the current time, + there must be at least 30 minutes between the current time and end + of the window to ensure pending changes are applied. + Default: Uses existing setting + + Format: ddd:hh24:mi-ddd:hh24:mi + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Must be at least 30 minutes + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Changing this parameter does not result in an outage and the change + is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + Constraints: Cannot be specified if the DB instance is a read replica. + + :type engine_version: string + :param engine_version: The version number of the database engine to + upgrade to. Changing this parameter results in an outage and the + change is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + For major version upgrades, if a non-default DB parameter group is + currently in use, a new DB parameter group in the DB parameter + group family for the new engine version must be specified. The new + DB parameter group can be the default for that DB parameter group + family. + + Example: `5.1.42` + + :type allow_major_version_upgrade: boolean + :param allow_major_version_upgrade: Indicates that major version + upgrades are allowed. Changing this parameter does not result in an + outage and the change is asynchronously applied as soon as + possible. + Constraints: This parameter must be set to true when specifying a value + for the EngineVersion parameter that is a different major version + than the DB instance's current version. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. Changing this parameter does not result in + an outage except in the following case and the change is + asynchronously applied as soon as possible. An outage will result + if this parameter is set to `True` during the maintenance window, + and a newer minor version is available, and RDS has enabled auto + patching for that engine version. + + :type iops: integer + :param iops: The new Provisioned IOPS (I/O operations per second) value + for the RDS instance. Changing this parameter does not result in an + outage and the change is applied during the next maintenance window + unless the `ApplyImmediately` parameter is set to `True` for this + request. + Default: Uses existing setting + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + Type: Integer + + If you choose to migrate your DB instance from using standard storage + to using Provisioned IOPS, or from using Provisioned IOPS to using + standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage + size, storage type (standard or Provisioned IOPS), amount of IOPS + provisioned (if any), and the number of prior scale storage + operations. Typical migration times are under 24 hours, but the + process can take up to several days in some cases. During the + migration, the DB instance will be available for use, but may + experience performance degradation. While the migration takes + place, nightly backups for the instance will be suspended. No other + Amazon RDS operations can take place for the instance, including + modifying the instance, rebooting the instance, deleting the + instance, creating a read replica for the instance, and creating a + DB snapshot of the instance. + + :type option_group_name: string + :param option_group_name: Indicates that the DB instance should be + associated with the specified option group. Changing this parameter + does not result in an outage except in the following case and the + change is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. If + the parameter change results in an option group that enables OEM, + this change can cause a brief (sub-second) period during which new + connections are rejected but existing connections are not + interrupted. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type new_db_instance_identifier: string + :param new_db_instance_identifier: + The new DB instance identifier for the DB instance when renaming a DB + Instance. This value is stored as a lowercase string. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if allocated_storage is not None: + params['AllocatedStorage'] = allocated_storage + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if db_security_groups is not None: + self.build_list_params(params, + db_security_groups, + 'DBSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + if master_user_password is not None: + params['MasterUserPassword'] = master_user_password + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if allow_major_version_upgrade is not None: + params['AllowMajorVersionUpgrade'] = str( + allow_major_version_upgrade).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if new_db_instance_identifier is not None: + params['NewDBInstanceIdentifier'] = new_db_instance_identifier + return self._make_request( + action='ModifyDBInstance', + verb='POST', + path='/', params=params) + + def modify_db_parameter_group(self, db_parameter_group_name, parameters): + """ + Modifies the parameters of a DB parameter group. To modify + more than one parameter, submit a list of the following: + `ParameterName`, `ParameterValue`, and `ApplyMethod`. A + maximum of 20 parameters can be modified in a single request. + + The `apply-immediate` method can be used only for dynamic + parameters; the `pending-reboot` method can be used with MySQL + and Oracle DB instances for either dynamic or static + parameters. For Microsoft SQL Server DB instances, the + `pending-reboot` method can be used only for static + parameters. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be the name of an existing DB parameter group + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type parameters: list + :param parameters: + An array of parameter names, values, and the apply method for the + parameter update. At least one parameter name, value, and apply + method must be supplied; subsequent arguments are optional. A + maximum of 20 parameters may be modified in a single request. + + Valid Values (for the application method): `immediate | pending-reboot` + + You can use the immediate value with dynamic parameters only. You can + use the pending-reboot value for both dynamic and static + parameters, and changes are applied when DB instance reboots. + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) + return self._make_request( + action='ModifyDBParameterGroup', + verb='POST', + path='/', params=params) + + def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids, + db_subnet_group_description=None): + """ + Modifies an existing DB subnet group. DB subnet groups must + contain at least one subnet in at least two AZs in the region. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name for the DB subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. Must not be "Default". + + Example: `mySubnetgroup` + + :type db_subnet_group_description: string + :param db_subnet_group_description: The description for the DB subnet + group. + + :type subnet_ids: list + :param subnet_ids: The EC2 subnet IDs for the DB subnet group. + + """ + params = {'DBSubnetGroupName': db_subnet_group_name, } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if db_subnet_group_description is not None: + params['DBSubnetGroupDescription'] = db_subnet_group_description + return self._make_request( + action='ModifyDBSubnetGroup', + verb='POST', + path='/', params=params) + + def modify_event_subscription(self, subscription_name, + sns_topic_arn=None, source_type=None, + event_categories=None, enabled=None): + """ + Modifies an existing RDS event notification subscription. Note + that you cannot modify the source identifiers using this call; + to change source identifiers for a subscription, use the + AddSourceIdentifierToSubscription and + RemoveSourceIdentifierFromSubscription calls. + + You can see a list of the event categories for a given + SourceType in the `Events`_ topic in the Amazon RDS User Guide + or by using the **DescribeEventCategories** action. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + created for event notification. The ARN is created by Amazon SNS + when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a DB instance, you would set this parameter to db-instance. if + this value is not specified, all events are returned. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + :type event_categories: list + :param event_categories: A list of event categories for a SourceType + that you want to subscribe to. You can see a list of the categories + for a given SourceType in the `Events`_ topic in the Amazon RDS + User Guide or by using the **DescribeEventCategories** action. + + :type enabled: boolean + :param enabled: A Boolean value; set to **true** to activate the + subscription. + + """ + params = {'SubscriptionName': subscription_name, } + if sns_topic_arn is not None: + params['SnsTopicArn'] = sns_topic_arn + if source_type is not None: + params['SourceType'] = source_type + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='ModifyEventSubscription', + verb='POST', + path='/', params=params) + + def modify_option_group(self, option_group_name, options_to_include=None, + options_to_remove=None, apply_immediately=None): + """ + Modifies an existing option group. + + :type option_group_name: string + :param option_group_name: The name of the option group to be modified. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type options_to_include: list + :param options_to_include: Options in this list are added to the option + group or, if already present, the specified configuration is used + to update the existing configuration. + + :type options_to_remove: list + :param options_to_remove: Options in this list are removed from the + option group. + + :type apply_immediately: boolean + :param apply_immediately: Indicates whether the changes should be + applied immediately, or during the next maintenance window for each + instance associated with the option group. + + """ + params = {'OptionGroupName': option_group_name, } + if options_to_include is not None: + self.build_complex_list_params( + params, options_to_include, + 'OptionsToInclude.member', + ('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')) + if options_to_remove is not None: + self.build_list_params(params, + options_to_remove, + 'OptionsToRemove.member') + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + return self._make_request( + action='ModifyOptionGroup', + verb='POST', + path='/', params=params) + + def promote_read_replica(self, db_instance_identifier, + backup_retention_period=None, + preferred_backup_window=None): + """ + Promotes a read replica DB instance to a standalone DB + instance. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier. This value + is stored as a lowercase string. + Constraints: + + + + Must be the identifier for an existing read replica DB instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: mydbinstance + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days to retain automated backups. Setting this parameter + to a positive number enables backups. Setting this parameter to 0 + disables automated backups. + + Default: 1 + + Constraints: + + + + Must be a value from 0 to 8 + + :type preferred_backup_window: string + :param preferred_backup_window: The daily time range during which + automated backups are created if automated backups are enabled, + using the `BackupRetentionPeriod` parameter. + Default: A 30-minute window selected at random from an 8-hour block of + time per region. See the Amazon RDS User Guide for the time blocks + for each region from which the default backup windows are assigned. + + Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be + Universal Time Coordinated (UTC). Must not conflict with the + preferred maintenance window. Must be at least 30 minutes. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + return self._make_request( + action='PromoteReadReplica', + verb='POST', + path='/', params=params) + + def purchase_reserved_db_instances_offering(self, + reserved_db_instances_offering_id, + reserved_db_instance_id=None, + db_instance_count=None, + tags=None): + """ + Purchases a reserved DB instance offering. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The ID of the Reserved DB + instance offering to purchase. + Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + + :type reserved_db_instance_id: string + :param reserved_db_instance_id: Customer-specified identifier to track + this reservation. + Example: myreservationID + + :type db_instance_count: integer + :param db_instance_count: The number of instances to reserve. + Default: `1` + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id, + } + if reserved_db_instance_id is not None: + params['ReservedDBInstanceId'] = reserved_db_instance_id + if db_instance_count is not None: + params['DBInstanceCount'] = db_instance_count + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='PurchaseReservedDBInstancesOffering', + verb='POST', + path='/', params=params) + + def reboot_db_instance(self, db_instance_identifier, force_failover=None): + """ + Rebooting a DB instance restarts the database engine service. + A reboot also applies to the DB instance any modifications to + the associated DB parameter group that were pending. Rebooting + a DB instance results in a momentary outage of the instance, + during which the DB instance status is set to rebooting. If + the RDS instance is configured for MultiAZ, it is possible + that the reboot will be conducted through a failover. An + Amazon RDS event is created when the reboot is completed. + + If your DB instance is deployed in multiple Availability + Zones, you can force a failover from one AZ to the other + during the reboot. You might force a failover to test the + availability of your DB instance deployment or to restore + operations to the original AZ after a failover occurs. + + The time required to reboot is a function of the specific + database engine's crash recovery process. To improve the + reboot time, we recommend that you reduce database activities + as much as possible during the reboot process to reduce + rollback activity for in-transit transactions. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This parameter is stored as a lowercase + string. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type force_failover: boolean + :param force_failover: When `True`, the reboot will be conducted + through a MultiAZ failover. + Constraint: You cannot specify `True` if the instance is not configured + for MultiAZ. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if force_failover is not None: + params['ForceFailover'] = str( + force_failover).lower() + return self._make_request( + action='RebootDBInstance', + verb='POST', + path='/', params=params) + + def remove_source_identifier_from_subscription(self, subscription_name, + source_identifier): + """ + Removes a source identifier from an existing RDS event + notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to remove a source identifier from. + + :type source_identifier: string + :param source_identifier: The source identifier to be removed from the + subscription, such as the **DB instance identifier** for a DB + instance or the name of a security group. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SourceIdentifier': source_identifier, + } + return self._make_request( + action='RemoveSourceIdentifierFromSubscription', + verb='POST', + path='/', params=params) + + def remove_tags_from_resource(self, resource_name, tag_keys): + """ + Removes metadata tags from an Amazon RDS resource. + + For an overview on tagging an Amazon RDS resource, see + `Tagging Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource the tags will be removed + from. This value is an Amazon Resource Name (ARN). For information + about creating an ARN, see ` Constructing an RDS Amazon Resource + Name (ARN)`_. + + :type tag_keys: list + :param tag_keys: The tag key (name) of the tag to be removed. + + """ + params = {'ResourceName': resource_name, } + self.build_list_params(params, + tag_keys, + 'TagKeys.member') + return self._make_request( + action='RemoveTagsFromResource', + verb='POST', + path='/', params=params) + + def reset_db_parameter_group(self, db_parameter_group_name, + reset_all_parameters=None, parameters=None): + """ + Modifies the parameters of a DB parameter group to the + engine/system default value. To reset specific parameters + submit a list of the following: `ParameterName` and + `ApplyMethod`. To reset the entire DB parameter group, specify + the `DBParameterGroup` name and `ResetAllParameters` + parameters. When resetting the entire group, dynamic + parameters are updated immediately and static parameters are + set to `pending-reboot` to take effect on the next DB instance + restart or `RebootDBInstance` request. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type reset_all_parameters: boolean + :param reset_all_parameters: Specifies whether ( `True`) or not ( + `False`) to reset all parameters in the DB parameter group to + default values. + Default: `True` + + :type parameters: list + :param parameters: An array of parameter names, values, and the apply + method for the parameter update. At least one parameter name, + value, and apply method must be supplied; subsequent arguments are + optional. A maximum of 20 parameters may be modified in a single + request. + **MySQL** + + Valid Values (for Apply method): `immediate` | `pending-reboot` + + You can use the immediate value with dynamic parameters only. You can + use the `pending-reboot` value for both dynamic and static + parameters, and changes are applied when DB instance reboots. + + **Oracle** + + Valid Values (for Apply method): `pending-reboot` + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) + return self._make_request( + action='ResetDBParameterGroup', + verb='POST', + path='/', params=params) + + def restore_db_instance_from_db_snapshot(self, db_instance_identifier, + db_snapshot_identifier, + db_instance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None, + multi_az=None, + publicly_accessible=None, + auto_minor_version_upgrade=None, + license_model=None, + db_name=None, engine=None, + iops=None, + option_group_name=None, + tags=None): + """ + Creates a new DB instance from a DB snapshot. The target + database is created from the source database restore point + with the same configuration as the original source database, + except that the new RDS instance is created with the default + security group. + + :type db_instance_identifier: string + :param db_instance_identifier: + The identifier for the DB snapshot to restore from. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: Name of the DB instance to create from + the DB snapshot. This parameter isn't case sensitive. + Constraints: + + + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the Amazon + RDS DB instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + :type port: integer + :param port: The port number on which the database accepts connections. + Default: The same port as the original DB instance + + Constraints: Value must be `1150-65535` + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone. + + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + Example: `us-east-1a` + + :type db_subnet_group_name: string + :param db_subnet_group_name: The DB subnet group name to use for the + new instance. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. + + :type license_model: string + :param license_model: License model information for the restored DB + instance. + Default: Same as source. + + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type db_name: string + :param db_name: + The database name for the restored DB instance. + + + This parameter doesn't apply to the MySQL engine. + + :type engine: string + :param engine: The database engine to use for the new instance. + Default: The same as source + + Constraint: Must be compatible with the engine of the source + + Example: `oracle-ee` + + :type iops: integer + :param iops: Specifies the amount of provisioned IOPS for the DB + instance, expressed in I/O operations per second. If this parameter + is not specified, the IOPS value will be taken from the backup. If + this parameter is set to 0, the new instance will be converted to a + non-PIOPS instance, which will take additional time, though your DB + instance will be available for connections before the conversion + starts. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: The name of the option group to be used for + the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'DBSnapshotIdentifier': db_snapshot_identifier, + } + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if db_name is not None: + params['DBName'] = db_name + if engine is not None: + params['Engine'] = engine + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='RestoreDBInstanceFromDBSnapshot', + verb='POST', + path='/', params=params) + + def restore_db_instance_to_point_in_time(self, + source_db_instance_identifier, + target_db_instance_identifier, + restore_time=None, + use_latest_restorable_time=None, + db_instance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None, + multi_az=None, + publicly_accessible=None, + auto_minor_version_upgrade=None, + license_model=None, + db_name=None, engine=None, + iops=None, + option_group_name=None, + tags=None): + """ + Restores a DB instance to an arbitrary point-in-time. Users + can restore to any point in time before the + latestRestorableTime for up to backupRetentionPeriod days. The + target database is created from the source database with the + same configuration as the original database except that the DB + instance is created with the default DB security group. + + :type source_db_instance_identifier: string + :param source_db_instance_identifier: + The identifier of the source DB instance from which to restore. + + Constraints: + + + + Must be the identifier of an existing database instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type target_db_instance_identifier: string + :param target_db_instance_identifier: + The name of the new database instance to be created. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type restore_time: timestamp + :param restore_time: The date and time to restore from. + Valid Values: Value must be a UTC time + + Constraints: + + + + Must be before the latest restorable time for the DB instance + + Cannot be specified if UseLatestRestorableTime parameter is true + + + Example: `2009-09-07T23:45:00Z` + + :type use_latest_restorable_time: boolean + :param use_latest_restorable_time: Specifies whether ( `True`) or not ( + `False`) the DB instance is restored from the latest backup time. + Default: `False` + + Constraints: Cannot be specified if RestoreTime parameter is provided. + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the Amazon + RDS DB instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + Default: The same DBInstanceClass as the original DB instance. + + :type port: integer + :param port: The port number on which the database accepts connections. + Constraints: Value must be `1150-65535` + + Default: The same port as the original DB instance. + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone. + + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to true. + + Example: `us-east-1a` + + :type db_subnet_group_name: string + :param db_subnet_group_name: The DB subnet group name to use for the + new instance. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. + + :type license_model: string + :param license_model: License model information for the restored DB + instance. + Default: Same as source. + + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type db_name: string + :param db_name: + The database name for the restored DB instance. + + + This parameter is not used for the MySQL engine. + + :type engine: string + :param engine: The database engine to use for the new instance. + Default: The same as source + + Constraint: Must be compatible with the engine of the source + + Example: `oracle-ee` + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: The name of the option group to be used for + the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SourceDBInstanceIdentifier': source_db_instance_identifier, + 'TargetDBInstanceIdentifier': target_db_instance_identifier, + } + if restore_time is not None: + params['RestoreTime'] = restore_time + if use_latest_restorable_time is not None: + params['UseLatestRestorableTime'] = str( + use_latest_restorable_time).lower() + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if db_name is not None: + params['DBName'] = db_name + if engine is not None: + params['Engine'] = engine + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='RestoreDBInstanceToPointInTime', + verb='POST', + path='/', params=params) + + def revoke_db_security_group_ingress(self, db_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_id=None, + ec2_security_group_owner_id=None): + """ + Revokes ingress from a DBSecurityGroup for previously + authorized IP ranges or EC2 or VPC Security Groups. Required + parameters for this API are one of CIDRIP, EC2SecurityGroupId + for VPC, or (EC2SecurityGroupOwnerId and either + EC2SecurityGroupName or EC2SecurityGroupId). + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to + revoke ingress from. + + :type cidrip: string + :param cidrip: The IP range to revoke access from. Must be a valid CIDR + range. If `CIDRIP` is specified, `EC2SecurityGroupName`, + `EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be + provided. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group to + revoke access from. For VPC DB security groups, + `EC2SecurityGroupId` must be provided. Otherwise, + EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or + `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_id: string + :param ec2_security_group_id: The id of the EC2 security group to + revoke access from. For VPC DB security groups, + `EC2SecurityGroupId` must be provided. Otherwise, + EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or + `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS Account Number of the owner + of the EC2 security group specified in the `EC2SecurityGroupName` + parameter. The AWS Access Key ID is not an acceptable value. For + VPC DB security groups, `EC2SecurityGroupId` must be provided. + Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_id is not None: + params['EC2SecurityGroupId'] = ec2_security_group_id + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='RevokeDBSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/awx/lib/site-packages/boto/redshift/__init__.py b/awx/lib/site-packages/boto/redshift/__init__.py index 1019e895a5..f98ececd75 100644 --- a/awx/lib/site-packages/boto/redshift/__init__.py +++ b/awx/lib/site-packages/boto/redshift/__init__.py @@ -20,7 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,27 +31,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.redshift.layer1 import RedshiftConnection - cls = RedshiftConnection - return [ - RegionInfo(name='us-east-1', - endpoint='redshift.us-east-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='us-west-2', - endpoint='redshift.us-west-2.amazonaws.com', - connection_cls=cls), - RegionInfo(name='eu-west-1', - endpoint='redshift.eu-west-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-northeast-1', - endpoint='redshift.ap-northeast-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-southeast-1', - endpoint='redshift.ap-southeast-1.amazonaws.com', - connection_cls=cls), - RegionInfo(name='ap-southeast-2', - endpoint='redshift.ap-southeast-2.amazonaws.com', - connection_cls=cls), - ] + return get_regions('redshift', connection_cls=RedshiftConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/regioninfo.py b/awx/lib/site-packages/boto/regioninfo.py index 6e936b3793..29ebb1e30b 100644 --- a/awx/lib/site-packages/boto/regioninfo.py +++ b/awx/lib/site-packages/boto/regioninfo.py @@ -20,6 +20,131 @@ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. +from __future__ import with_statement +import os + +import boto +from boto.compat import json +from boto.exception import BotoClientError + + +def load_endpoint_json(path): + """ + Loads a given JSON file & returns it. + + :param path: The path to the JSON file + :type path: string + + :returns: The loaded data + """ + with open(path, 'r') as endpoints_file: + return json.load(endpoints_file) + + +def merge_endpoints(defaults, additions): + """ + Given an existing set of endpoint data, this will deep-update it with + any similarly structured data in the additions. + + :param defaults: The existing endpoints data + :type defaults: dict + + :param defaults: The additional endpoints data + :type defaults: dict + + :returns: The modified endpoints data + :rtype: dict + """ + # We can't just do an ``defaults.update(...)`` here, as that could + # *overwrite* regions if present in both. + # We'll iterate instead, essentially doing a deeper merge. + for service, region_info in additions.items(): + # Set the default, if not present, to an empty dict. + defaults.setdefault(service, {}) + defaults[service].update(region_info) + + return defaults + + +def load_regions(): + """ + Actually load the region/endpoint information from the JSON files. + + By default, this loads from the default included ``boto/endpoints.json`` + file. + + Users can override/extend this by supplying either a ``BOTO_ENDPOINTS`` + environment variable or a ``endpoints_path`` config variable, either of + which should be an absolute path to the user's JSON file. + + :returns: The endpoints data + :rtype: dict + """ + # Load the defaults first. + endpoints = load_endpoint_json(boto.ENDPOINTS_PATH) + additional_path = None + + # Try the ENV var. If not, check the config file. + if os.environ.get('BOTO_ENDPOINTS'): + additional_path = os.environ['BOTO_ENDPOINTS'] + elif boto.config.get('boto', 'endpoints_path'): + additional_path = boto.config.get('boto', 'endpoints_path') + + # If there's a file provided, we'll load it & additively merge it into + # the endpoints. + if additional_path: + additional = load_endpoint_json(additional_path) + endpoints = merge_endpoints(endpoints, additional) + + return endpoints + + +def get_regions(service_name, region_cls=None, connection_cls=None): + """ + Given a service name (like ``ec2``), returns a list of ``RegionInfo`` + objects for that service. + + This leverages the ``endpoints.json`` file (+ optional user overrides) to + configure/construct all the objects. + + :param service_name: The name of the service to construct the ``RegionInfo`` + objects for. Ex: ``ec2``, ``s3``, ``sns``, etc. + :type service_name: string + + :param region_cls: (Optional) The class to use when constructing. By + default, this is ``RegionInfo``. + :type region_cls: class + + :param connection_cls: (Optional) The connection class for the + ``RegionInfo`` object. Providing this allows the ``connect`` method on + the ``RegionInfo`` to work. Default is ``None`` (no connection). + :type connection_cls: class + + :returns: A list of configured ``RegionInfo`` objects + :rtype: list + """ + endpoints = load_regions() + + if not service_name in endpoints: + raise BotoClientError( + "Service '%s' not found in endpoints." % service_name + ) + + if region_cls is None: + region_cls = RegionInfo + + region_objs = [] + + for region_name, endpoint in endpoints.get(service_name, {}).items(): + region_objs.append( + region_cls( + name=region_name, + endpoint=endpoint, + connection_cls=connection_cls + ) + ) + + return region_objs class RegionInfo(object): diff --git a/awx/lib/site-packages/boto/requestlog.py b/awx/lib/site-packages/boto/requestlog.py new file mode 100644 index 0000000000..5f1c255168 --- /dev/null +++ b/awx/lib/site-packages/boto/requestlog.py @@ -0,0 +1,39 @@ + +from datetime import datetime +from threading import Thread +import Queue + +from boto.utils import RequestHook + +class RequestLogger(RequestHook): + """ + This class implements a request logger that uses a single thread to + write to a log file. + """ + def __init__(self, filename='/tmp/request_log.csv'): + self.request_log_file = open(filename, 'w') + self.request_log_queue = Queue.Queue(100) + Thread(target=self._request_log_worker).start() + + + def handle_request_data(self, request, response, error=False): + len = 0 if error else response.getheader('Content-Length') + now = datetime.now() + time = now.strftime('%Y-%m-%d %H:%M:%S') + td = (now - request.start_time) + duration = (td.microseconds + long(td.seconds + td.days*24*3600) * 1e6) / 1e6 + + # write output including timestamp, status code, response time, response size, request action + self.request_log_queue.put("'%s', '%s', '%s', '%s', '%s'\n" % (time, response.status, duration, len, request.params['Action'])) + + + def _request_log_worker(self): + while True: + try: + item = self.request_log_queue.get(True) + self.request_log_file.write(item) + self.request_log_file.flush() + self.request_log_queue.task_done() + except: + import traceback; traceback.print_exc(file=sys.stdout) + diff --git a/awx/lib/site-packages/boto/resultset.py b/awx/lib/site-packages/boto/resultset.py index f89ddbc032..830525820c 100644 --- a/awx/lib/site-packages/boto/resultset.py +++ b/awx/lib/site-packages/boto/resultset.py @@ -117,6 +117,11 @@ class ResultSet(list): self.append(value) elif name == 'NextToken': self.next_token = value + elif name == 'nextToken': + self.next_token = value + # Code exists which expects nextToken to be available, so we + # set it here to remain backwards-compatibile. + self.nextToken = value elif name == 'BoxUsage': try: connection.box_usage += float(value) diff --git a/awx/lib/site-packages/boto/roboto/param.py b/awx/lib/site-packages/boto/roboto/param.py index d4ddbd9f1b..ed3e6be9b9 100644 --- a/awx/lib/site-packages/boto/roboto/param.py +++ b/awx/lib/site-packages/boto/roboto/param.py @@ -67,7 +67,7 @@ class Converter(object): except: raise ValidationException(param, '') -class Param(object): +class Param(Converter): def __init__(self, name=None, ptype='string', optional=True, short_name=None, long_name=None, doc='', @@ -142,6 +142,6 @@ class Param(object): :param value: The value to convert. This should always be a string. """ - return super(Param, self).convert(value) + return super(Param, self).convert(self,value) diff --git a/awx/lib/site-packages/boto/route53/__init__.py b/awx/lib/site-packages/boto/route53/__init__.py index 3546d25d36..7b131f921d 100644 --- a/awx/lib/site-packages/boto/route53/__init__.py +++ b/awx/lib/site-packages/boto/route53/__init__.py @@ -23,8 +23,8 @@ # this is here for backward compatibility # originally, the Route53Connection class was defined here -from connection import Route53Connection -from boto.regioninfo import RegionInfo +from boto.route53.connection import Route53Connection +from boto.regioninfo import RegionInfo, get_regions class Route53RegionInfo(RegionInfo): @@ -51,10 +51,22 @@ def regions(): :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [Route53RegionInfo(name='universal', - endpoint='route53.amazonaws.com', - connection_cls=Route53Connection) - ] + regions = get_regions( + 'route53', + region_cls=Route53RegionInfo, + connection_cls=Route53Connection + ) + + # For historical reasons, we had a "universal" endpoint as well. + regions.append( + Route53RegionInfo( + name='universal', + endpoint='route53.amazonaws.com', + connection_cls=Route53Connection + ) + ) + + return regions def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/route53/connection.py b/awx/lib/site-packages/boto/route53/connection.py index 398ff87097..7f45c778ce 100644 --- a/awx/lib/site-packages/boto/route53/connection.py +++ b/awx/lib/site-packages/boto/route53/connection.py @@ -54,22 +54,24 @@ class Route53Connection(AWSAuthConnection): DefaultHost = 'route53.amazonaws.com' """The default Route53 API endpoint to connect to.""" - Version = '2012-02-29' + Version = '2013-04-01' """Route53 API version.""" - XMLNameSpace = 'https://route53.amazonaws.com/doc/2012-02-29/' + XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/' """XML schema for this Route53 API version.""" def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, port=None, proxy=None, proxy_port=None, host=DefaultHost, debug=0, security_token=None, - validate_certs=True, https_connection_factory=None): + validate_certs=True, https_connection_factory=None, + profile_name=None): super(Route53Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, True, port, proxy, proxy_port, debug=debug, security_token=security_token, validate_certs=validate_certs, - https_connection_factory=https_connection_factory) + https_connection_factory=https_connection_factory, + profile_name=profile_name) def _required_auth_capability(self): return ['route53'] @@ -224,6 +226,101 @@ class Route53Connection(AWSAuthConnection): h.parse(body) return e + + # Health checks + + POSTHCXMLBody = """ + %(caller_ref)s + %(health_check)s + """ + + def create_health_check(self, health_check, caller_ref=None): + """ + Create a new Health Check + + :type health_check: HealthCheck + :param health_check: HealthCheck object + + :type caller_ref: str + :param caller_ref: A unique string that identifies the request + and that allows failed CreateHealthCheckRequest requests to be retried + without the risk of executing the operation twice. If you don't + provide a value for this, boto will generate a Type 4 UUID and + use that. + + """ + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + uri = '/%s/healthcheck' % self.Version + params = {'xmlns': self.XMLNameSpace, + 'caller_ref': caller_ref, + 'health_check': health_check.to_xml() + } + xml_body = self.POSTHCXMLBody % params + response = self.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, response.reason, body) + + def get_list_health_checks(self, maxitems=None, marker=None): + """ + Return a list of health checks + + :type maxitems: int + :param maxitems: Maximum number of items to return + + :type marker: str + :param marker: marker to get next set of items to list + + """ + + params = {} + if maxitems is not None: + params['maxitems'] = maxitems + if marker is not None: + params['marker'] = marker + + uri = '/%s/healthcheck' % (self.Version, ) + response = self.make_request('GET', uri, params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='HealthChecks', item_marker=('HealthCheck',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def delete_health_check(self, health_check_id): + """ + Delete a health check + + :type health_check_id: str + :param health_check_id: ID of the health check to delete + + """ + uri = '/%s/healthcheck/%s' % (self.Version, health_check_id) + response = self.make_request('DELETE', uri) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + # Resource Record Sets def get_all_rrsets(self, hosted_zone_id, type=None, diff --git a/awx/lib/site-packages/boto/route53/healthcheck.py b/awx/lib/site-packages/boto/route53/healthcheck.py new file mode 100644 index 0000000000..059d208b4b --- /dev/null +++ b/awx/lib/site-packages/boto/route53/healthcheck.py @@ -0,0 +1,128 @@ +# Copyright (c) 2014 Tellybug, Matt Millar +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +""" +From http://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHealthCheck.html + +POST /2013-04-01/healthcheck HTTP/1.1 + + + + unique description + + IP address of the endpoint to check + port on the endpoint to check + HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP + path of the file that + you want Amazon Route 53 to request + domain name of the + endpoint to check + if Type is HTTP_STR_MATCH or HTTPS_STR_MATCH, + the string to search for in the response body + from the specified resource + + + %(ip_addr)s + %(port)s + %(type)s + %(resource_path)s + %(fqdn_part)s + %(string_match_part)s + %(request_interval)s + + """ + + XMLFQDNPart = """%(fqdn)s""" + + XMLStringMatchPart = """%(string_match)s""" + + XMLRequestIntervalPart = """%(request_interval)d""" + + valid_request_intervals = (10, 30) + + def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30): + """ + HealthCheck object + + :type ip_addr: str + :param ip_addr: IP Address + + :type port: int + :param port: Port to check + + :type hc_type: str + :param ip_addr: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP + + :type resource_path: str + :param resource_path: Path to check + + :type fqdn: str + :param fqdn: domain name of the endpoint to check + + :type string_match: str + :param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource + + :type request_interval: int + :param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request. + + """ + self.ip_addr = ip_addr + self.port = port + self.hc_type = hc_type + self.resource_path = resource_path + self.fqdn = fqdn + self.string_match = string_match + + if request_interval in self.valid_request_intervals: + self.request_interval = request_interval + else: + raise AttributeError( + "Valid values for request_interval are: %s" % + ",".join(str(i) for i in self.valid_request_intervals)) + + def to_xml(self): + params = { + 'ip_addr': self.ip_addr, + 'port': self.port, + 'type': self.hc_type, + 'resource_path': self.resource_path, + 'fqdn_part': "", + 'string_match_part': "", + 'request_interval': (self.XMLRequestIntervalPart % + {'request_interval': self.request_interval}), + } + if self.fqdn is not None: + params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn} + + if self.string_match is not None: + params['string_match_part'] = self.XMLStringMatchPart % {'string_match' : self.string_match} + + return self.POSTXMLBody % params diff --git a/awx/lib/site-packages/boto/route53/record.py b/awx/lib/site-packages/boto/route53/record.py index 17f38b94c8..e04e009e27 100644 --- a/awx/lib/site-packages/boto/route53/record.py +++ b/awx/lib/site-packages/boto/route53/record.py @@ -35,7 +35,7 @@ class ResourceRecordSets(ResultSet): """ ChangeResourceRecordSetsBody = """ - + %(comment)s %(changes)s @@ -66,12 +66,13 @@ class ResourceRecordSets(ResultSet): def add_change(self, action, name, type, ttl=600, alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, - weight=None, region=None): + weight=None, region=None, alias_evaluate_target_health=None, + health_check=None): """ Add a change request to the set. :type action: str - :param action: The action to perform ('CREATE'|'DELETE') + :param action: The action to perform ('CREATE'|'DELETE'|'UPSERT') :type name: str :param name: The name of the domain you want to perform the action on. @@ -118,11 +119,22 @@ class ResourceRecordSets(ResultSet): record sets that have the same combination of DNS name and type, a value that determines which region this should be associated with for the latency-based routing + + :type alias_evaluate_target_health: Boolean + :param region: *Required for alias resource record sets* Indicates + whether this Resource Record Set should respect the health status of + any health checks associated with the ALIAS target record which it is + linked to. + + :type health_check: str + :param health_check: Health check to associate with this record """ change = Record(name, type, ttl, alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name, identifier=identifier, - weight=weight, region=region) + weight=weight, region=region, + alias_evaluate_target_health=alias_evaluate_target_health, + health_check=health_check) self.changes.append([action, change]) return change @@ -178,11 +190,14 @@ class ResourceRecordSets(ResultSet): class Record(object): """An individual ResourceRecordSet""" + HealthCheckBody = """%s""" + XMLBody = """ %(name)s %(type)s %(weight)s %(body)s + %(health_check)s """ WRRBody = """ @@ -206,19 +221,22 @@ class Record(object): """ AliasBody = """ - %s - %s + %(hosted_zone_id)s + %(dns_name)s + %(eval_target_health)s """ + EvaluateTargetHealth = """%s""" def __init__(self, name=None, type=None, ttl=600, resource_records=None, alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, - weight=None, region=None): + weight=None, region=None, alias_evaluate_target_health=None, + health_check=None): self.name = name self.type = type self.ttl = ttl - if resource_records == None: + if resource_records is None: resource_records = [] self.resource_records = resource_records self.alias_hosted_zone_id = alias_hosted_zone_id @@ -226,6 +244,8 @@ class Record(object): self.identifier = identifier self.weight = weight self.region = region + self.alias_evaluate_target_health = alias_evaluate_target_health + self.health_check = health_check def __repr__(self): return '' % (self.name, self.type, self.to_print()) @@ -234,16 +254,25 @@ class Record(object): """Add a resource record value""" self.resource_records.append(value) - def set_alias(self, alias_hosted_zone_id, alias_dns_name): + def set_alias(self, alias_hosted_zone_id, alias_dns_name, + alias_evaluate_target_health=False): """Make this an alias resource record set""" self.alias_hosted_zone_id = alias_hosted_zone_id self.alias_dns_name = alias_dns_name + self.alias_evaluate_target_health = alias_evaluate_target_health def to_xml(self): """Spit this resource record set out as XML""" - if self.alias_hosted_zone_id != None and self.alias_dns_name != None: + if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None: # Use alias - body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name) + if self.alias_evaluate_target_health is not None: + eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false') + else: + eval_target_health = "" + + body = self.AliasBody % { "hosted_zone_id": self.alias_hosted_zone_id, + "dns_name": self.alias_dns_name, + "eval_target_health": eval_target_health } else: # Use resource record(s) records = "" @@ -258,33 +287,40 @@ class Record(object): weight = "" - if self.identifier != None and self.weight != None: + if self.identifier is not None and self.weight is not None: weight = self.WRRBody % {"identifier": self.identifier, "weight": self.weight} - elif self.identifier != None and self.region != None: + elif self.identifier is not None and self.region is not None: weight = self.RRRBody % {"identifier": self.identifier, "region": self.region} + health_check = "" + if self.health_check is not None: + health_check = self.HealthCheckBody % (self.health_check) + params = { "name": self.name, "type": self.type, "weight": weight, "body": body, + "health_check": health_check } return self.XMLBody % params def to_print(self): rr = "" - if self.alias_hosted_zone_id != None and self.alias_dns_name != None: + if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None: # Show alias rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name + if self.alias_evaluate_target_health is not None: + rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health else: # Show resource record(s) rr = ",".join(self.resource_records) - if self.identifier != None and self.weight != None: + if self.identifier is not None and self.weight is not None: rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight) - elif self.identifier != None and self.region != None: + elif self.identifier is not None and self.region is not None: rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region) return rr @@ -304,6 +340,8 @@ class Record(object): self.alias_dns_name = value elif name == 'SetIdentifier': self.identifier = value + elif name == 'EvaluateTargetHealth': + self.alias_evaluate_target_health = value elif name == 'Weight': self.weight = value elif name == 'Region': diff --git a/awx/lib/site-packages/boto/route53/zone.py b/awx/lib/site-packages/boto/route53/zone.py index 75cefd48ae..bb6907da37 100644 --- a/awx/lib/site-packages/boto/route53/zone.py +++ b/awx/lib/site-packages/boto/route53/zone.py @@ -34,8 +34,8 @@ class Zone(object): """ A Route53 Zone. - :ivar Route53Connection route53connection - :ivar str Id: The ID of the hosted zone. + :ivar route53connection: A :class:`boto.route53.connection.Route53Connection` connection + :ivar id: The ID of the hosted zone """ def __init__(self, route53connection, zone_dict): self.route53connection = route53connection diff --git a/awx/lib/site-packages/boto/s3/__init__.py b/awx/lib/site-packages/boto/s3/__init__.py index 49a73ea7f4..271c104752 100644 --- a/awx/lib/site-packages/boto/s3/__init__.py +++ b/awx/lib/site-packages/boto/s3/__init__.py @@ -22,7 +22,7 @@ # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions class S3RegionInfo(RegionInfo): @@ -50,37 +50,11 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from .connection import S3Connection - return [S3RegionInfo(name='us-east-1', - endpoint='s3.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='us-gov-west-1', - endpoint='s3-us-gov-west-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='us-west-1', - endpoint='s3-us-west-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='us-west-2', - endpoint='s3-us-west-2.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='ap-northeast-1', - endpoint='s3-ap-northeast-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='ap-southeast-1', - endpoint='s3-ap-southeast-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='ap-southeast-2', - endpoint='s3-ap-southeast-2.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='eu-west-1', - endpoint='s3-eu-west-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='sa-east-1', - endpoint='s3-sa-east-1.amazonaws.com', - connection_cls=S3Connection), - S3RegionInfo(name='cn-north-1', - endpoint='s3.cn-north-1.amazonaws.com.cn', - connection_cls=S3Connection), - ] + return get_regions( + 's3', + region_cls=S3RegionInfo, + connection_cls=S3Connection + ) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/s3/bucket.py b/awx/lib/site-packages/boto/s3/bucket.py index a14fb8a72a..ed40970388 100644 --- a/awx/lib/site-packages/boto/s3/bucket.py +++ b/awx/lib/site-packages/boto/s3/bucket.py @@ -143,24 +143,46 @@ class Bucket(object): return self.get_key(key_name, headers=headers) def get_key(self, key_name, headers=None, version_id=None, - response_headers=None): + response_headers=None, validate=True): """ Check to see if a particular key exists within the bucket. This method uses a HEAD request to check for the existance of the key. Returns: An instance of a Key object or None - :type key_name: string :param key_name: The name of the key to retrieve + :type key_name: string + + :param headers: The headers to send when retrieving the key + :type headers: dict + + :param version_id: + :type version_id: string - :type response_headers: dict :param response_headers: A dictionary containing HTTP headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. + :type response_headers: dict + + :param validate: Verifies whether the key exists. If ``False``, this + will not hit the service, constructing an in-memory object. + Default is ``True``. + :type validate: bool :rtype: :class:`boto.s3.key.Key` :returns: A Key object from this bucket. """ + if validate is False: + if headers or version_id or response_headers: + raise BotoClientError( + "When providing 'validate=False', no other params " + \ + "are allowed." + ) + + # This leans on the default behavior of ``new_key`` (not hitting + # the service). If that changes, that behavior should migrate here. + return self.new_key(key_name) + query_args_l = [] if version_id: query_args_l.append('versionId=%s' % version_id) @@ -545,6 +567,7 @@ class Bucket(object): list only if they have an upload ID lexicographically greater than the specified upload_id_marker. + :type encoding_type: string :param encoding_type: Requests Amazon S3 to encode the response and specifies the encoding method to use. @@ -555,14 +578,31 @@ class Bucket(object): encode the keys in the response. Valid options: ``url`` - :type encoding_type: string + + :type delimiter: string + :param delimiter: Character you use to group keys. + All keys that contain the same string between the prefix, if + specified, and the first occurrence of the delimiter after the + prefix are grouped under a single result element, CommonPrefixes. + If you don't specify the prefix parameter, then the substring + starts at the beginning of the key. The keys that are grouped + under CommonPrefixes result element are not returned elsewhere + in the response. + + :type prefix: string + :param prefix: Lists in-progress uploads only for those keys that + begin with the specified prefix. You can use prefixes to separate + a bucket into different grouping of keys. (You can think of using + prefix to make groups in the same way you'd use a folder in a + file system.) :rtype: ResultSet :return: The result from S3 listing the uploads requested """ self.validate_kwarg_names(params, ['max_uploads', 'key_marker', - 'upload_id_marker', 'encoding_type']) + 'upload_id_marker', 'encoding_type', + 'delimiter', 'prefix']) return self._get_all([('Upload', MultiPartUpload), ('CommonPrefixes', Prefix)], 'uploads', headers, **params) diff --git a/awx/lib/site-packages/boto/s3/connection.py b/awx/lib/site-packages/boto/s3/connection.py index 4a15809077..d6b3b52f68 100644 --- a/awx/lib/site-packages/boto/s3/connection.py +++ b/awx/lib/site-packages/boto/s3/connection.py @@ -148,6 +148,16 @@ class Location(object): CNNorth1 = 'cn-north-1' +class NoHostProvided(object): + # An identifying object to help determine whether the user provided a + # ``host`` or not. Never instantiated. + pass + + +class HostRequiredError(BotoClientError): + pass + + class S3Connection(AWSAuthConnection): DefaultHost = boto.config.get('s3', 'host', 's3.amazonaws.com') @@ -157,12 +167,16 @@ class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, - host=DefaultHost, debug=0, https_connection_factory=None, + host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, - validate_certs=None): - if isinstance(calling_format, str): + validate_certs=None, profile_name=None): + no_host_provided = False + if host is NoHostProvided: + no_host_provided = True + host = self.DefaultHost + if isinstance(calling_format, basestring): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class @@ -173,7 +187,14 @@ class S3Connection(AWSAuthConnection): debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, - validate_certs=validate_certs) + validate_certs=validate_certs, profile_name=profile_name) + # We need to delay until after the call to ``super`` before checking + # to see if SigV4 is in use. + if no_host_provided: + if 'hmac-v4-s3' in self._required_auth_capability(): + raise HostRequiredError( + "When using SigV4, you must specify a 'host' parameter." + ) @detect_potential_s3sigv4 def _required_auth_capability(self): @@ -270,9 +291,9 @@ class S3Connection(AWSAuthConnection): """ - if fields == None: + if fields is None: fields = [] - if conditions == None: + if conditions is None: conditions = [] expiration = time.gmtime(int(time.time() + expires_in)) @@ -418,6 +439,23 @@ class S3Connection(AWSAuthConnection): ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. + If ``validate=False`` is passed, no request is made to the service (no + charge/communication delay). This is only safe to do if you are **sure** + the bucket exists. + + If the default ``validate=True`` is passed, a request is made to the + service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched + a list of keys (but with a max limit set to ``0``, always returning an empty + list) in the bucket (& included better error messages), at an + increased expense. As of Boto v2.25.0, this now performs a HEAD request + (less expensive but worse error messages). + + If you were relying on parsing the error message before, you should call + something like:: + + bucket = conn.get_bucket('', validate=False) + bucket.get_all_keys(maxkeys=0) + :type bucket_name: string :param bucket_name: The name of the bucket @@ -426,13 +464,58 @@ class S3Connection(AWSAuthConnection): AWS. :type validate: boolean - :param validate: If ``True``, it will try to fetch all keys within the - given bucket. (Default: ``True``) + :param validate: If ``True``, it will try to verify the bucket exists + on the service-side. (Default: ``True``) """ - bucket = self.bucket_class(self, bucket_name) if validate: - bucket.get_all_keys(headers, maxkeys=0) - return bucket + return self.head_bucket(bucket_name, headers=headers) + else: + return self.bucket_class(self, bucket_name) + + def head_bucket(self, bucket_name, headers=None): + """ + Determines if a bucket exists by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :returns: A object + """ + response = self.make_request('HEAD', bucket_name, headers=headers) + body = response.read() + if response.status == 200: + return self.bucket_class(self, bucket_name) + elif response.status == 403: + # For backward-compatibility, we'll populate part of the exception + # with the most-common default. + err = self.provider.storage_response_error( + response.status, + response.reason, + body + ) + err.error_code = 'AccessDenied' + err.error_message = 'Access Denied' + raise err + elif response.status == 404: + # For backward-compatibility, we'll populate part of the exception + # with the most-common default. + err = self.provider.storage_response_error( + response.status, + response.reason, + body + ) + err.error_code = 'NoSuchBucket' + err.error_message = 'The specified bucket does not exist' + raise err + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) def lookup(self, bucket_name, validate=True, headers=None): """ diff --git a/awx/lib/site-packages/boto/s3/key.py b/awx/lib/site-packages/boto/s3/key.py index 0849584d46..ba20c41aac 100644 --- a/awx/lib/site-packages/boto/s3/key.py +++ b/awx/lib/site-packages/boto/s3/key.py @@ -217,7 +217,8 @@ class Key(object): self.delete_marker = False def handle_restore_headers(self, response): - header = response.getheader('x-amz-restore') + provider = self.bucket.connection.provider + header = response.getheader(provider.restore_header) if header is None: return parts = header.split(',', 1) @@ -258,7 +259,7 @@ class Key(object): with the stored object in the response. See http://goo.gl/EWOPb for details. """ - if self.resp == None: + if self.resp is None: self.mode = 'r' provider = self.bucket.connection.provider @@ -299,6 +300,7 @@ class Key(object): self.content_disposition = value self.handle_version_headers(self.resp) self.handle_encryption_headers(self.resp) + self.handle_restore_headers(self.resp) self.handle_addl_headers(self.resp.getheaders()) def open_write(self, headers=None, override_num_retries=None): @@ -538,19 +540,19 @@ class Key(object): # convenience methods for setting/getting ACL def set_acl(self, acl_str, headers=None): - if self.bucket != None: + if self.bucket is not None: self.bucket.set_acl(acl_str, self.name, headers=headers) def get_acl(self, headers=None): - if self.bucket != None: + if self.bucket is not None: return self.bucket.get_acl(self.name, headers=headers) def get_xml_acl(self, headers=None): - if self.bucket != None: + if self.bucket is not None: return self.bucket.get_xml_acl(self.name, headers=headers) def set_xml_acl(self, acl_str, headers=None): - if self.bucket != None: + if self.bucket is not None: return self.bucket.set_xml_acl(acl_str, self.name, headers=headers) def set_canned_acl(self, acl_str, headers=None): @@ -882,7 +884,7 @@ class Key(object): 'Content-Type', headers) elif self.path: self.content_type = mimetypes.guess_type(self.path)[0] - if self.content_type == None: + if self.content_type is None: self.content_type = self.DefaultContentType headers['Content-Type'] = self.content_type else: @@ -1060,7 +1062,7 @@ class Key(object): if provider.storage_class_header: headers[provider.storage_class_header] = self.storage_class - if self.bucket != None: + if self.bucket is not None: if not replace: if self.bucket.lookup(self.name): return @@ -1194,7 +1196,7 @@ class Key(object): # What if different providers provide different classes? if hasattr(fp, 'name'): self.path = fp.name - if self.bucket != None: + if self.bucket is not None: if not md5 and provider.supports_chunked_transfer(): # defer md5 calculation to on the fly and # we don't know anything about size yet. @@ -1233,7 +1235,7 @@ class Key(object): self.md5 = md5[0] self.base64md5 = md5[1] - if self.name == None: + if self.name is None: self.name = self.md5 if not replace: if self.bucket.lookup(self.name): @@ -1416,6 +1418,14 @@ class Key(object): headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. """ self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, torrent=torrent, version_id=version_id, @@ -1573,8 +1583,16 @@ class Key(object): headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. """ - if self.bucket != None: + if self.bucket is not None: if res_download_handler: res_download_handler.get_file(self, fp, headers, cb, num_cb, torrent=torrent, @@ -1629,6 +1647,14 @@ class Key(object): headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. """ try: with open(filename, 'wb') as fp: @@ -1641,7 +1667,7 @@ class Key(object): os.remove(filename) raise # if last_modified date was sent from s3, try to set file's timestamp - if self.last_modified != None: + if self.last_modified is not None: try: modified_tuple = rfc822.parsedate_tz(self.last_modified) modified_stamp = int(rfc822.mktime_tz(modified_tuple)) @@ -1687,6 +1713,14 @@ class Key(object): with the stored object in the response. See http://goo.gl/EWOPb for details. + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + :rtype: string :returns: The contents of the file as a string """ diff --git a/awx/lib/site-packages/boto/s3/lifecycle.py b/awx/lib/site-packages/boto/s3/lifecycle.py index 58126e6d5b..dadc1d3293 100644 --- a/awx/lib/site-packages/boto/s3/lifecycle.py +++ b/awx/lib/site-packages/boto/s3/lifecycle.py @@ -23,16 +23,18 @@ class Rule(object): """ - A Lifcycle rule for an S3 bucket. + A Lifecycle rule for an S3 bucket. :ivar id: Unique identifier for the rule. The value cannot be longer - than 255 characters. + than 255 characters. This value is optional. The server will + generate a unique value for the rule if no value is provided. :ivar prefix: Prefix identifying one or more objects to which the - rule applies. + rule applies. If prefix is not provided, Boto generates a default + prefix which will match all objects. - :ivar status: If Enabled, the rule is currently being applied. - If Disabled, the rule is not currently being applied. + :ivar status: If 'Enabled', the rule is currently being applied. + If 'Disabled', the rule is not currently being applied. :ivar expiration: An instance of `Expiration`. This indicates the lifetime of the objects that are subject to the rule. @@ -44,7 +46,7 @@ class Rule(object): def __init__(self, id=None, prefix=None, status=None, expiration=None, transition=None): self.id = id - self.prefix = prefix + self.prefix = '' if prefix is None else prefix self.status = status if isinstance(expiration, (int, long)): # retain backwards compatibility??? @@ -78,7 +80,8 @@ class Rule(object): def to_xml(self): s = '' - s += '%s' % self.id + if self.id is not None: + s += '%s' % self.id s += '%s' % self.prefix s += '%s' % self.status if self.expiration is not None: @@ -199,7 +202,8 @@ class Lifecycle(list): s += '' return s - def add_rule(self, id, prefix, status, expiration, transition=None): + def add_rule(self, id=None, prefix='', status='Enabled', + expiration=None, transition=None): """ Add a rule to this Lifecycle configuration. This only adds the rule to the local copy. To install the new rule(s) on @@ -208,7 +212,8 @@ class Lifecycle(list): :type id: str :param id: Unique identifier for the rule. The value cannot be longer - than 255 characters. + than 255 characters. This value is optional. The server will + generate a unique value for the rule if no value is provided. :type prefix: str :iparam prefix: Prefix identifying one or more objects to which the diff --git a/awx/lib/site-packages/boto/s3/resumable_download_handler.py b/awx/lib/site-packages/boto/s3/resumable_download_handler.py index cf182791fc..56e0ce3e8c 100644 --- a/awx/lib/site-packages/boto/s3/resumable_download_handler.py +++ b/awx/lib/site-packages/boto/s3/resumable_download_handler.py @@ -140,7 +140,7 @@ class ResumableDownloadHandler(object): # is attempted on an object), but warn user for other errors. if e.errno != errno.ENOENT: # Will restart because - # self.etag_value_for_current_download == None. + # self.etag_value_for_current_download is None. print('Couldn\'t read URI tracker file (%s): %s. Restarting ' 'download from scratch.' % (self.tracker_file_name, e.strerror)) diff --git a/awx/lib/site-packages/boto/sdb/__init__.py b/awx/lib/site-packages/boto/sdb/__init__.py index bebc15221c..6cb30050cd 100644 --- a/awx/lib/site-packages/boto/sdb/__init__.py +++ b/awx/lib/site-packages/boto/sdb/__init__.py @@ -21,6 +21,7 @@ # from .regioninfo import SDBRegionInfo +from boto.regioninfo import get_regions def regions(): @@ -30,23 +31,10 @@ def regions(): :rtype: list :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances """ - return [SDBRegionInfo(name='us-east-1', - endpoint='sdb.amazonaws.com'), - SDBRegionInfo(name='eu-west-1', - endpoint='sdb.eu-west-1.amazonaws.com'), - SDBRegionInfo(name='us-west-1', - endpoint='sdb.us-west-1.amazonaws.com'), - SDBRegionInfo(name='sa-east-1', - endpoint='sdb.sa-east-1.amazonaws.com'), - SDBRegionInfo(name='us-west-2', - endpoint='sdb.us-west-2.amazonaws.com'), - SDBRegionInfo(name='ap-northeast-1', - endpoint='sdb.ap-northeast-1.amazonaws.com'), - SDBRegionInfo(name='ap-southeast-1', - endpoint='sdb.ap-southeast-1.amazonaws.com'), - SDBRegionInfo(name='ap-southeast-2', - endpoint='sdb.ap-southeast-2.amazonaws.com') - ] + return get_regions( + 'sdb', + region_cls=SDBRegionInfo + ) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/sdb/connection.py b/awx/lib/site-packages/boto/sdb/connection.py index 04212df81d..c7370b6745 100644 --- a/awx/lib/site-packages/boto/sdb/connection.py +++ b/awx/lib/site-packages/boto/sdb/connection.py @@ -86,7 +86,8 @@ class SDBConnection(AWSQueryConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - converter=None, security_token=None, validate_certs=True): + converter=None, security_token=None, validate_certs=True, + profile_name=None): """ For any keywords that aren't documented, refer to the parent class, :py:class:`boto.connection.AWSAuthConnection`. You can avoid having @@ -118,7 +119,8 @@ class SDBConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) self.box_usage = 0.0 self.converter = converter self.item_cls = Item @@ -493,7 +495,7 @@ class SDBConnection(AWSQueryConnection): response = self.make_request('GetAttributes', params) body = response.read() if response.status == 200: - if item == None: + if item is None: item = self.item_cls(domain, item_name) h = handler.XmlHandler(item, self) xml.sax.parseString(body, h) diff --git a/awx/lib/site-packages/boto/sdb/db/key.py b/awx/lib/site-packages/boto/sdb/db/key.py index f630d398a3..6ac47a68aa 100644 --- a/awx/lib/site-packages/boto/sdb/db/key.py +++ b/awx/lib/site-packages/boto/sdb/db/key.py @@ -50,7 +50,7 @@ class Key(object): return self.id def has_id_or_name(self): - return self.id != None + return self.id is not None def parent(self): raise NotImplementedError("Key parents are not currently supported") diff --git a/awx/lib/site-packages/boto/sdb/db/manager/sdbmanager.py b/awx/lib/site-packages/boto/sdb/db/manager/sdbmanager.py index fd9777deb6..2613ff0867 100644 --- a/awx/lib/site-packages/boto/sdb/db/manager/sdbmanager.py +++ b/awx/lib/site-packages/boto/sdb/db/manager/sdbmanager.py @@ -107,7 +107,7 @@ class SDBConverter(object): def encode_map(self, prop, value): import urllib - if value == None: + if value is None: return None if not isinstance(value, dict): raise ValueError('Expected a dict value, got %s' % type(value)) @@ -117,7 +117,7 @@ class SDBConverter(object): if self.model_class in item_type.mro(): item_type = self.model_class encoded_value = self.encode(item_type, value[key]) - if encoded_value != None: + if encoded_value is not None: new_value.append('%s:%s' % (urllib.quote(key), encoded_value)) return new_value @@ -136,7 +136,7 @@ class SDBConverter(object): item_type = getattr(prop, "item_type") dec_val = {} for val in value: - if val != None: + if val is not None: k, v = self.decode_map_element(item_type, val) try: k = int(k) @@ -264,7 +264,7 @@ class SDBConverter(object): return float(mantissa + 'e' + exponent) def encode_datetime(self, value): - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): return value if isinstance(value, datetime): return value.strftime(ISO8601) @@ -289,7 +289,7 @@ class SDBConverter(object): return None def encode_date(self, value): - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): return value return value.isoformat() @@ -322,7 +322,7 @@ class SDBConverter(object): def encode_reference(self, value): if value in (None, 'None', '', ' '): return None - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): return value else: return value.id @@ -335,7 +335,7 @@ class SDBConverter(object): def encode_blob(self, value): if not value: return None - if isinstance(value, str): + if isinstance(value, basestring): return value if not value.id: @@ -351,7 +351,7 @@ class SDBConverter(object): else: raise SDBPersistenceError("Invalid Blob ID: %s" % value.id) - if value.value != None: + if value.value is not None: key.set_contents_from_string(value.value) return value.id @@ -415,7 +415,7 @@ class SDBManager(object): self.converter = SDBConverter(self) self._sdb = None self._domain = None - if consistent == None and hasattr(cls, "__consistent__"): + if consistent is None and hasattr(cls, "__consistent__"): consistent = cls.__consistent__ self.consistent = consistent @@ -456,7 +456,7 @@ class SDBManager(object): yield obj def encode_value(self, prop, value): - if value == None: + if value is None: return None if not prop: return str(value) @@ -544,7 +544,7 @@ class SDBManager(object): name = 'itemName()' if name != "itemName()": name = '`%s`' % name - if val == None: + if val is None: if op in ('is', '='): return "%(name)s is null" % {"name": name} elif op in ('is not', '!='): @@ -581,11 +581,11 @@ class SDBManager(object): order_by_filtered = True query_parts.append("(%s)" % select) - if isinstance(filters, str) or isinstance(filters, unicode): + if isinstance(filters, basestring): query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__) if order_by in ["__id__", "itemName()"]: query += " ORDER BY itemName() %s" % order_by_method - elif order_by != None: + elif order_by is not None: query += " ORDER BY `%s` %s" % (order_by, order_by_method) return query @@ -667,7 +667,7 @@ class SDBManager(object): value = self.encode_value(property, value) if value == []: value = None - if value == None: + if value is None: del_attrs.append(property.name) continue attrs[property.name] = value diff --git a/awx/lib/site-packages/boto/sdb/db/manager/xmlmanager.py b/awx/lib/site-packages/boto/sdb/db/manager/xmlmanager.py index c4cc5d3e7c..2cfcd13278 100644 --- a/awx/lib/site-packages/boto/sdb/db/manager/xmlmanager.py +++ b/awx/lib/site-packages/boto/sdb/db/manager/xmlmanager.py @@ -145,9 +145,9 @@ class XMLConverter(object): return None def encode_reference(self, value): - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): return value - if value == None: + if value is None: return '' else: val_node = self.manager.doc.createElement("object") @@ -296,7 +296,7 @@ class XMLManager(object): prop = obj.find_property(prop_name) value = self.decode_value(prop, prop_node) value = prop.make_value_from_datastore(value) - if value != None: + if value is not None: try: setattr(obj, prop.name, value) except: @@ -321,7 +321,7 @@ class XMLManager(object): prop = cls.find_property(prop_name) value = self.decode_value(prop, prop_node) value = prop.make_value_from_datastore(value) - if value != None: + if value is not None: props[prop.name] = value return (cls, props, id) @@ -466,7 +466,7 @@ class XMLManager(object): return doc def unmarshal_object(self, fp, cls=None, id=None): - if isinstance(fp, str) or isinstance(fp, unicode): + if isinstance(fp, basestring): doc = parseString(fp) else: doc = parse(fp) @@ -477,7 +477,7 @@ class XMLManager(object): Same as unmarshalling an object, except it returns from "get_props_from_doc" """ - if isinstance(fp, str) or isinstance(fp, unicode): + if isinstance(fp, basestring): doc = parseString(fp) else: doc = parse(fp) diff --git a/awx/lib/site-packages/boto/sdb/db/model.py b/awx/lib/site-packages/boto/sdb/db/model.py index 3d9a6b5afb..9e589d523f 100644 --- a/awx/lib/site-packages/boto/sdb/db/model.py +++ b/awx/lib/site-packages/boto/sdb/db/model.py @@ -270,7 +270,7 @@ class Model(object): return cls for sc in cls.__sub_classes__: r = sc.find_subclass(name) - if r != None: + if r is not None: return r class Expando(Model): diff --git a/awx/lib/site-packages/boto/sdb/db/property.py b/awx/lib/site-packages/boto/sdb/db/property.py index 44dab47d71..7488c2c30b 100644 --- a/awx/lib/site-packages/boto/sdb/db/property.py +++ b/awx/lib/site-packages/boto/sdb/db/property.py @@ -85,7 +85,7 @@ class Property(object): return self.default def validate(self, value): - if self.required and value == None: + if self.required and value is None: raise ValueError('%s is a required property' % self.name) if self.choices and value and not value in self.choices: raise ValueError('%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name)) @@ -111,9 +111,9 @@ class Property(object): def validate_string(value): - if value == None: + if value is None: return - elif isinstance(value, str) or isinstance(value, unicode): + elif isinstance(value, basestring): if len(value) > 1024: raise ValueError('Length of value greater than maxlength') else: @@ -144,7 +144,7 @@ class TextProperty(Property): def validate(self, value): value = super(TextProperty, self).validate(value) - if not isinstance(value, str) and not isinstance(value, unicode): + if not isinstance(value, basestring): raise TypeError('Expecting Text, got %s' % type(value)) if self.max_length and len(value) > self.max_length: raise ValueError('Length of value greater than maxlength %s' % self.max_length) @@ -328,7 +328,7 @@ class IntegerProperty(Property): return value is None def __set__(self, obj, value): - if value == "" or value == None: + if value == "" or value is None: value = 0 return super(IntegerProperty, self).__set__(obj, value) @@ -408,7 +408,7 @@ class DateTimeProperty(Property): return super(DateTimeProperty, self).default_value() def validate(self, value): - if value == None: + if value is None: return if isinstance(value, datetime.date): return value @@ -441,7 +441,7 @@ class DateProperty(Property): def validate(self, value): value = super(DateProperty, self).validate(value) - if value == None: + if value is None: return if not isinstance(value, self.data_type): raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) @@ -493,7 +493,7 @@ class ReferenceProperty(Property): # If the value is still the UUID for the referenced object, we need to create # the object now that is the attribute has actually been accessed. This lazy # instantiation saves unnecessary roundtrips to SimpleDB - if isinstance(value, str) or isinstance(value, unicode): + if isinstance(value, basestring): value = self.reference_class(value) setattr(obj, self.name, value) return value @@ -501,7 +501,7 @@ class ReferenceProperty(Property): def __set__(self, obj, value): """Don't allow this object to be associated to itself This causes bad things to happen""" - if value != None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)): + if value is not None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)): raise ValueError("Can not associate an object with itself!") return super(ReferenceProperty, self).__set__(obj, value) @@ -533,11 +533,11 @@ class ReferenceProperty(Property): def validate(self, value): if self.validator: self.validator(value) - if self.required and value == None: + if self.required and value is None: raise ValueError('%s is a required property' % self.name) if value == self.default_value(): return - if not isinstance(value, str) and not isinstance(value, unicode): + if not isinstance(value, basestring): self.check_instance(value) @@ -658,7 +658,7 @@ class ListProperty(Property): item_type = self.item_type if isinstance(value, item_type): value = [value] - elif value == None: # Override to allow them to set this to "None" to remove everything + elif value is None: # Override to allow them to set this to "None" to remove everything value = [] return super(ListProperty, self).__set__(obj, value) diff --git a/awx/lib/site-packages/boto/sdb/db/query.py b/awx/lib/site-packages/boto/sdb/db/query.py index 31b71aa03b..bd1a41dd90 100644 --- a/awx/lib/site-packages/boto/sdb/db/query.py +++ b/awx/lib/site-packages/boto/sdb/db/query.py @@ -39,7 +39,7 @@ class Query(object): return iter(self.manager.query(self)) def next(self): - if self.__local_iter__ == None: + if self.__local_iter__ is None: self.__local_iter__ = self.__iter__() return self.__local_iter__.next() diff --git a/awx/lib/site-packages/boto/sdb/db/sequence.py b/awx/lib/site-packages/boto/sdb/db/sequence.py index 121512f208..70540c52f1 100644 --- a/awx/lib/site-packages/boto/sdb/db/sequence.py +++ b/awx/lib/site-packages/boto/sdb/db/sequence.py @@ -59,7 +59,7 @@ class SequenceGenerator(object): # If they pass us in a string that's not at least # the lenght of our sequence, then return the # first element in our sequence - if val == None or len(val) < self.sequence_length: + if val is None or len(val) < self.sequence_length: return self.sequence_string[0] last_value = val[-self.sequence_length:] if (not self.rollover) and (last_value == self.last_item): @@ -79,21 +79,21 @@ class SequenceGenerator(object): # Simple Sequence Functions # def increment_by_one(cv=None, lv=None): - if cv == None: + if cv is None: return 0 return cv + 1 def double(cv=None, lv=None): - if cv == None: + if cv is None: return 1 return cv * 2 def fib(cv=1, lv=0): """The fibonacci sequence, this incrementer uses the last value""" - if cv == None: + if cv is None: cv = 1 - if lv == None: + if lv is None: lv = 0 return cv + lv @@ -136,17 +136,17 @@ class Sequence(object): self.last_value = None self.domain_name = domain_name self.id = id - if init_val == None: + if init_val is None: init_val = fnc(init_val) - if self.id == None: + if self.id is None: import uuid self.id = str(uuid.uuid4()) self.item_type = type(fnc(None)) self.timestamp = None # Allow us to pass in a full name to a function - if isinstance(fnc, str): + if isinstance(fnc, basestring): from boto.utils import find_class fnc = find_class(fnc) self.fnc = fnc @@ -162,7 +162,7 @@ class Sequence(object): expected_value = [] new_val = {} new_val['timestamp'] = now - if self._value != None: + if self._value is not None: new_val['last_value'] = self._value expected_value = ['current_value', str(self._value)] new_val['current_value'] = val @@ -184,7 +184,7 @@ class Sequence(object): self.timestamp = val['timestamp'] if 'current_value' in val: self._value = self.item_type(val['current_value']) - if "last_value" in val and val['last_value'] != None: + if "last_value" in val and val['last_value'] is not None: self.last_value = self.item_type(val['last_value']) return self._value diff --git a/awx/lib/site-packages/boto/sdb/item.py b/awx/lib/site-packages/boto/sdb/item.py index 999c7f0b31..a742d80ca8 100644 --- a/awx/lib/site-packages/boto/sdb/item.py +++ b/awx/lib/site-packages/boto/sdb/item.py @@ -123,7 +123,7 @@ class Item(dict): if replace: del_attrs = [] for name in self: - if self[name] == None: + if self[name] is None: del_attrs.append(name) if len(del_attrs) > 0: self.domain.delete_attributes(self.name, del_attrs) diff --git a/awx/lib/site-packages/boto/sdb/queryresultset.py b/awx/lib/site-packages/boto/sdb/queryresultset.py index f943949fe3..9ff0ae2f56 100644 --- a/awx/lib/site-packages/boto/sdb/queryresultset.py +++ b/awx/lib/site-packages/boto/sdb/queryresultset.py @@ -33,7 +33,7 @@ def query_lister(domain, query='', max_items=None, attr_names=None): yield item num_results += 1 next_token = rs.next_token - more_results = next_token != None + more_results = next_token is not None class QueryResultSet(object): @@ -59,7 +59,7 @@ def select_lister(domain, query='', max_items=None): yield item num_results += 1 next_token = rs.next_token - more_results = next_token != None + more_results = next_token is not None class SelectResultSet(object): @@ -86,7 +86,7 @@ class SelectResultSet(object): self.next_token = rs.next_token if self.max_items and num_results >= self.max_items: raise StopIteration - more_results = self.next_token != None + more_results = self.next_token is not None def next(self): return self.__iter__().next() diff --git a/awx/lib/site-packages/boto/sdb/regioninfo.py b/awx/lib/site-packages/boto/sdb/regioninfo.py index 769be555ee..cb0211e161 100644 --- a/awx/lib/site-packages/boto/sdb/regioninfo.py +++ b/awx/lib/site-packages/boto/sdb/regioninfo.py @@ -26,7 +26,8 @@ from boto.regioninfo import RegionInfo class SDBRegionInfo(RegionInfo): - def __init__(self, connection=None, name=None, endpoint=None): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): from boto.sdb.connection import SDBConnection super(SDBRegionInfo, self).__init__(connection, name, endpoint, SDBConnection) diff --git a/awx/lib/site-packages/boto/services/message.py b/awx/lib/site-packages/boto/services/message.py index 79f6d19f66..31f37019fc 100644 --- a/awx/lib/site-packages/boto/services/message.py +++ b/awx/lib/site-packages/boto/services/message.py @@ -34,7 +34,7 @@ class ServiceMessage(MHMessage): self['OriginalLocation'] = t[0] self['OriginalFileName'] = t[1] mime_type = mimetypes.guess_type(t[1])[0] - if mime_type == None: + if mime_type is None: mime_type = 'application/octet-stream' self['Content-Type'] = mime_type s = os.stat(key.path) diff --git a/awx/lib/site-packages/boto/ses/__init__.py b/awx/lib/site-packages/boto/ses/__init__.py index b3d03ae3d5..81d4206d79 100644 --- a/awx/lib/site-packages/boto/ses/__init__.py +++ b/awx/lib/site-packages/boto/ses/__init__.py @@ -21,7 +21,7 @@ # IN THE SOFTWARE. from connection import SESConnection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,9 +31,7 @@ def regions(): :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [RegionInfo(name='us-east-1', - endpoint='email.us-east-1.amazonaws.com', - connection_cls=SESConnection)] + return get_regions('ses', connection_cls=SESConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/ses/connection.py b/awx/lib/site-packages/boto/ses/connection.py index 5425ef3515..df115232d8 100644 --- a/awx/lib/site-packages/boto/ses/connection.py +++ b/awx/lib/site-packages/boto/ses/connection.py @@ -42,7 +42,7 @@ class SESConnection(AWSAuthConnection): is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) @@ -53,7 +53,8 @@ class SESConnection(AWSAuthConnection): proxy_user, proxy_pass, debug, https_connection_factory, path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _required_auth_capability(self): return ['ses'] diff --git a/awx/lib/site-packages/boto/sns/__init__.py b/awx/lib/site-packages/boto/sns/__init__.py index 4764a94f17..1517f5f18a 100644 --- a/awx/lib/site-packages/boto/sns/__init__.py +++ b/awx/lib/site-packages/boto/sns/__init__.py @@ -23,7 +23,7 @@ # this is here for backward compatibility # originally, the SNSConnection class was defined here from connection import SNSConnection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -33,37 +33,7 @@ def regions(): :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [RegionInfo(name='us-east-1', - endpoint='sns.us-east-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='eu-west-1', - endpoint='sns.eu-west-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='us-gov-west-1', - endpoint='sns.us-gov-west-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='us-west-1', - endpoint='sns.us-west-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='sa-east-1', - endpoint='sns.sa-east-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='us-west-2', - endpoint='sns.us-west-2.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='ap-northeast-1', - endpoint='sns.ap-northeast-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='ap-southeast-1', - endpoint='sns.ap-southeast-1.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='ap-southeast-2', - endpoint='sns.ap-southeast-2.amazonaws.com', - connection_cls=SNSConnection), - RegionInfo(name='cn-north-1', - endpoint=' sns.cn-north-1.amazonaws.com.cn', - connection_cls=SNSConnection), - ] + return get_regions('sns', connection_cls=SNSConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/sns/connection.py b/awx/lib/site-packages/boto/sns/connection.py index 1d459311fe..c98793f5bd 100644 --- a/awx/lib/site-packages/boto/sns/connection.py +++ b/awx/lib/site-packages/boto/sns/connection.py @@ -48,15 +48,18 @@ class SNSConnection(AWSQueryConnection): requests, and handling error responses. For a list of available SDKs, go to `Tools for Amazon Web Services`_. """ - DefaultRegionName = 'us-east-1' - DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com' - APIVersion = '2010-03-31' + DefaultRegionName = boto.config.get('Boto', 'sns_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'sns_region_endpoint', + 'sns.us-east-1.amazonaws.com') + APIVersion = boto.config.get('Boto', 'sns_version', '2010-03-31') + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', - security_token=None, validate_certs=True): + security_token=None, validate_certs=True, + profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, @@ -69,7 +72,8 @@ class SNSConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + profile_name=profile_name) def _build_dict_as_list_params(self, params, dictionary, name): """ @@ -264,7 +268,7 @@ class SNSConnection(AWSQueryConnection): :type protocol: string :param protocol: The protocol used to communicate with the subscriber. Current choices are: - email|email-json|http|https|sqs|sms + email|email-json|http|https|sqs|sms|application :type endpoint: string :param endpoint: The location of the endpoint for @@ -274,7 +278,10 @@ class SNSConnection(AWSQueryConnection): * For http, this would be a URL beginning with http * For https, this would be a URL beginning with https * For sqs, this would be the ARN of an SQS Queue - * For sms, this would be a phone number of an SMS-enabled device + * For sms, this would be a phone number of an + SMS-enabled device + * For application, the endpoint is the EndpointArn + of a mobile app and device. """ params = {'TopicArn': topic, 'Protocol': protocol, diff --git a/awx/lib/site-packages/boto/sqs/__init__.py b/awx/lib/site-packages/boto/sqs/__init__.py index 35e6c2bdec..526a34cc73 100644 --- a/awx/lib/site-packages/boto/sqs/__init__.py +++ b/awx/lib/site-packages/boto/sqs/__init__.py @@ -21,6 +21,7 @@ # from regioninfo import SQSRegionInfo +from boto.regioninfo import get_regions def regions(): @@ -30,27 +31,10 @@ def regions(): :rtype: list :return: A list of :class:`boto.sqs.regioninfo.RegionInfo` """ - return [SQSRegionInfo(name='us-east-1', - endpoint='queue.amazonaws.com'), - SQSRegionInfo(name='us-gov-west-1', - endpoint='sqs.us-gov-west-1.amazonaws.com'), - SQSRegionInfo(name='eu-west-1', - endpoint='eu-west-1.queue.amazonaws.com'), - SQSRegionInfo(name='us-west-1', - endpoint='us-west-1.queue.amazonaws.com'), - SQSRegionInfo(name='us-west-2', - endpoint='us-west-2.queue.amazonaws.com'), - SQSRegionInfo(name='sa-east-1', - endpoint='sa-east-1.queue.amazonaws.com'), - SQSRegionInfo(name='ap-northeast-1', - endpoint='ap-northeast-1.queue.amazonaws.com'), - SQSRegionInfo(name='ap-southeast-1', - endpoint='ap-southeast-1.queue.amazonaws.com'), - SQSRegionInfo(name='ap-southeast-2', - endpoint='ap-southeast-2.queue.amazonaws.com'), - SQSRegionInfo(name='cn-north-1', - endpoint='sqs.cn-north-1.amazonaws.com.cn'), - ] + return get_regions( + 'sqs', + region_cls=SQSRegionInfo + ) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/sqs/connection.py b/awx/lib/site-packages/boto/sqs/connection.py index f403d639cc..8fc69306bf 100644 --- a/awx/lib/site-packages/boto/sqs/connection.py +++ b/awx/lib/site-packages/boto/sqs/connection.py @@ -19,6 +19,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. +import boto from boto.connection import AWSQueryConnection from boto.sqs.regioninfo import SQSRegionInfo from boto.sqs.queue import Queue @@ -32,9 +33,10 @@ class SQSConnection(AWSQueryConnection): """ A Connection to the SQS Service. """ - DefaultRegionName = 'us-east-1' - DefaultRegionEndpoint = 'queue.amazonaws.com' - APIVersion = '2012-11-05' + DefaultRegionName = boto.config.get('Boto', 'sqs_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'sqs_region_endpoint', + 'queue.amazonaws.com') + APIVersion = boto.config.get('Boto', 'sqs_version', '2012-11-05') DefaultContentType = 'text/plain' ResponseError = SQSError AuthServiceName = 'sqs' @@ -113,7 +115,7 @@ class SQSConnection(AWSQueryConnection): Gets one or all attributes of a Queue :type queue: A Queue object - :param queue: The SQS queue to be deleted + :param queue: The SQS queue to get attributes for :type attribute: str :type attribute: The specific attribute requested. If not @@ -127,6 +129,7 @@ class SQSConnection(AWSQueryConnection): * LastModifiedTimestamp * Policy * ReceiveMessageWaitTimeSeconds + * RedrivePolicy :rtype: :class:`boto.sqs.attributes.Attributes` :return: An Attributes object containing request value(s). @@ -357,6 +360,19 @@ class SQSConnection(AWSQueryConnection): lookup = get_queue + def get_dead_letter_source_queues(self, queue): + """ + Retrieves the dead letter source queues for a given queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The queue for which to get DL source queues + :rtype: list + :returns: A list of :py:class:`boto.sqs.queue.Queue` instances. + """ + params = {'QueueUrl': queue.url} + return self.get_list('ListDeadLetterSourceQueues', params, + [('QueueUrl', Queue)]) + # # Permissions methods # diff --git a/awx/lib/site-packages/boto/sqs/connection2.py b/awx/lib/site-packages/boto/sqs/connection2.py new file mode 100644 index 0000000000..f1eb9fadb1 --- /dev/null +++ b/awx/lib/site-packages/boto/sqs/connection2.py @@ -0,0 +1,868 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +try: + import json +except ImportError: + import simplejson as json + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.sqs import exceptions + + +class SQSConnection(AWSQueryConnection): + """ + Welcome to the Amazon Simple Queue Service API Reference . This + section describes who should read this guide, how the guide is + organized, and other resources related to the Amazon Simple Queue + Service (Amazon SQS). + + Amazon SQS offers reliable and scalable hosted queues for storing + messages as they travel between computers. By using Amazon SQS, + you can move data between distributed components of your + applications that perform different tasks without losing messages + or requiring each component to be always available. + + Helpful Links: + + + `Current WSDL (2012-11-05)`_ + + `Making API Requests`_ + + `Amazon SQS product page`_ + + `Regions and Endpoints`_ + + + + We also provide SDKs that enable you to access Amazon SQS from + your preferred programming language. The SDKs contain + functionality that automatically takes care of tasks such as: + + + + + Cryptographically signing your service requests + + Retrying requests + + Handling error responses + + + + For a list of available SDKs, go to `Tools for Amazon Web + Services`_. + """ + APIVersion = "2012-11-05" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "sqs.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "QueueDoesNotExist": exceptions.QueueDoesNotExist, + "BatchEntryIdsNotDistinct": exceptions.BatchEntryIdsNotDistinct, + "EmptyBatchRequest": exceptions.EmptyBatchRequest, + "OverLimit": exceptions.OverLimit, + "QueueNameExists": exceptions.QueueNameExists, + "InvalidMessageContents": exceptions.InvalidMessageContents, + "TooManyEntriesInBatchRequest": exceptions.TooManyEntriesInBatchRequest, + "QueueDeletedRecently": exceptions.QueueDeletedRecently, + "InvalidBatchEntryId": exceptions.InvalidBatchEntryId, + "BatchRequestTooLong": exceptions.BatchRequestTooLong, + "InvalidIdFormat": exceptions.InvalidIdFormat, + "ReceiptHandleIsInvalid": exceptions.ReceiptHandleIsInvalid, + "InvalidAttributeName": exceptions.InvalidAttributeName, + "MessageNotInflight": exceptions.MessageNotInflight, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(SQSConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_permission(self, queue_url, label, aws_account_ids, actions): + """ + Adds a permission to a queue for a specific `principal`_. This + allows for sharing access to the queue. + + When you create a queue, you have full control access rights + for the queue. Only you (as owner of the queue) can grant or + deny permissions to the queue. For more information about + these permissions, see `Shared Queues`_ in the Amazon SQS + Developer Guide . + + `AddPermission` writes an Amazon SQS-generated policy. If you + want to write your own policy, use SetQueueAttributes to + upload your policy. For more information about writing your + own policy, see `Using The Access Policy Language`_ in the + Amazon SQS Developer Guide . + Some API actions take lists of parameters. These lists are + specified using the `param.n` notation. Values of `n` are + integers starting from 1. For example, a parameter list with + two elements looks like this: + `&Attribute.1=this` + + `&Attribute.2=that` + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type label: string + :param label: The unique identification of the permission you're + setting (e.g., `AliceSendMessage`). Constraints: Maximum 80 + characters; alphanumeric characters, hyphens (-), and underscores + (_) are allowed. + + :type aws_account_ids: list + :param aws_account_ids: The AWS account number of the `principal`_ who + will be given permission. The principal must have an AWS account, + but does not need to be signed up for Amazon SQS. For information + about locating the AWS account identification, see `Your AWS + Identifiers`_ in the Amazon SQS Developer Guide . + + :type actions: list + :param actions: The action the client wants to allow for the specified + principal. The following are valid values: `* | SendMessage | + ReceiveMessage | DeleteMessage | ChangeMessageVisibility | + GetQueueAttributes | GetQueueUrl`. For more information about these + actions, see `Understanding Permissions`_ in the Amazon SQS + Developer Guide . + Specifying `SendMessage`, `DeleteMessage`, or `ChangeMessageVisibility` + for the `ActionName.n` also grants permissions for the + corresponding batch versions of those actions: `SendMessageBatch`, + `DeleteMessageBatch`, and `ChangeMessageVisibilityBatch`. + + """ + params = {'QueueUrl': queue_url, 'Label': label, } + self.build_list_params(params, + aws_account_ids, + 'AWSAccountIds.member') + self.build_list_params(params, + actions, + 'Actions.member') + return self._make_request( + action='AddPermission', + verb='POST', + path='/', params=params) + + def change_message_visibility(self, queue_url, receipt_handle, + visibility_timeout): + """ + Changes the visibility timeout of a specified message in a + queue to a new value. The maximum allowed timeout value you + can set the value to is 12 hours. This means you can't extend + the timeout of a message in an existing queue to more than a + total visibility timeout of 12 hours. (For more information + visibility timeout, see `Visibility Timeout`_ in the Amazon + SQS Developer Guide .) + + For example, let's say you have a message and its default + message visibility timeout is 30 minutes. You could call + `ChangeMessageVisiblity` with a value of two hours and the + effective timeout would be two hours and 30 minutes. When that + time comes near you could again extend the time out by calling + ChangeMessageVisiblity, but this time the maximum allowed + timeout would be 9 hours and 30 minutes. + If you attempt to set the `VisibilityTimeout` to an amount + more than the maximum time left, Amazon SQS returns an error. + It will not automatically recalculate and increase the timeout + to the maximum time remaining. Unlike with a queue, when you + change the visibility timeout for a specific message, that + timeout value is applied immediately but is not saved in + memory for that message. If you don't delete a message after + it is received, the visibility timeout for the message the + next time it is received reverts to the original timeout + value, not the value you set with the + `ChangeMessageVisibility` action. + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type receipt_handle: string + :param receipt_handle: The receipt handle associated with the message + whose visibility timeout should be changed. This parameter is + returned by the ReceiveMessage action. + + :type visibility_timeout: integer + :param visibility_timeout: The new value (in seconds - from 0 to 43200 + - maximum 12 hours) for the message's visibility timeout. + + """ + params = { + 'QueueUrl': queue_url, + 'ReceiptHandle': receipt_handle, + 'VisibilityTimeout': visibility_timeout, + } + return self._make_request( + action='ChangeMessageVisibility', + verb='POST', + path='/', params=params) + + def change_message_visibility_batch(self, queue_url, entries): + """ + Changes the visibility timeout of multiple messages. This is a + batch version of ChangeMessageVisibility. The result of the + action on each message is reported individually in the + response. You can send up to 10 ChangeMessageVisibility + requests with each `ChangeMessageVisibilityBatch` action. + Because the batch request can result in a combination of + successful and unsuccessful actions, you should check for + batch errors even when the call returns an HTTP status code of + 200. Some API actions take lists of parameters. These lists + are specified using the `param.n` notation. Values of `n` are + integers starting from 1. For example, a parameter list with + two elements looks like this: + `&Attribute.1=this` + + `&Attribute.2=that` + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type entries: list + :param entries: A list of receipt handles of the messages for which the + visibility timeout must be changed. + + """ + params = {'QueueUrl': queue_url, } + self.build_complex_list_params( + params, entries, + 'Entries.member', + ('Id', 'ReceiptHandle', 'VisibilityTimeout')) + return self._make_request( + action='ChangeMessageVisibilityBatch', + verb='POST', + path='/', params=params) + + def create_queue(self, queue_name, attributes=None): + """ + Creates a new queue, or returns the URL of an existing one. + When you request `CreateQueue`, you provide a name for the + queue. To successfully create a new queue, you must provide a + name that is unique within the scope of your own queues. + + If you delete a queue, you must wait at least 60 seconds + before creating a queue with the same name. + + You may pass one or more attributes in the request. If you do + not provide a value for any attribute, the queue will have the + default value for that attribute. Permitted attributes are the + same that can be set using SetQueueAttributes. + + Use GetQueueUrl to get a queue's URL. GetQueueUrl requires + only the `QueueName` parameter. + + If you provide the name of an existing queue, along with the + exact names and values of all the queue's attributes, + `CreateQueue` returns the queue URL for the existing queue. If + the queue name, attribute names, or attribute values do not + match an existing queue, `CreateQueue` returns an error. + Some API actions take lists of parameters. These lists are + specified using the `param.n` notation. Values of `n` are + integers starting from 1. For example, a parameter list with + two elements looks like this: + `&Attribute.1=this` + + `&Attribute.2=that` + + :type queue_name: string + :param queue_name: The name for the queue to be created. + + :type attributes: map + :param attributes: A map of attributes with their corresponding values. + The following lists the names, descriptions, and values of the special + request parameters the `CreateQueue` action uses: + + + + + `DelaySeconds` - The time in seconds that the delivery of all + messages in the queue will be delayed. An integer from 0 to 900 (15 + minutes). The default for this attribute is 0 (zero). + + `MaximumMessageSize` - The limit of how many bytes a message can + contain before Amazon SQS rejects it. An integer from 1024 bytes (1 + KiB) up to 262144 bytes (256 KiB). The default for this attribute + is 262144 (256 KiB). + + `MessageRetentionPeriod` - The number of seconds Amazon SQS retains a + message. Integer representing seconds, from 60 (1 minute) to + 1209600 (14 days). The default for this attribute is 345600 (4 + days). + + `Policy` - The queue's policy. A valid form-url-encoded policy. For + more information about policy structure, see `Basic Policy + Structure`_ in the Amazon SQS Developer Guide . For more + information about form-url-encoding, see `http://www.w3.org/MarkUp + /html-spec/html-spec_8.html#SEC8.2.1`_. + + `ReceiveMessageWaitTimeSeconds` - The time for which a ReceiveMessage + call will wait for a message to arrive. An integer from 0 to 20 + (seconds). The default for this attribute is 0. + + `VisibilityTimeout` - The visibility timeout for the queue. An + integer from 0 to 43200 (12 hours). The default for this attribute + is 30. For more information about visibility timeout, see + `Visibility Timeout`_ in the Amazon SQS Developer Guide . + + """ + params = {'QueueName': queue_name, } + if attributes is not None: + params['Attributes'] = attributes + return self._make_request( + action='CreateQueue', + verb='POST', + path='/', params=params) + + def delete_message(self, queue_url, receipt_handle): + """ + Deletes the specified message from the specified queue. You + specify the message by using the message's `receipt handle` + and not the `message ID` you received when you sent the + message. Even if the message is locked by another reader due + to the visibility timeout setting, it is still deleted from + the queue. If you leave a message in the queue for longer than + the queue's configured retention period, Amazon SQS + automatically deletes it. + + The receipt handle is associated with a specific instance of + receiving the message. If you receive a message more than + once, the receipt handle you get each time you receive the + message is different. When you request `DeleteMessage`, if you + don't provide the most recently received receipt handle for + the message, the request will still succeed, but the message + might not be deleted. + + It is possible you will receive a message even after you have + deleted it. This might happen on rare occasions if one of the + servers storing a copy of the message is unavailable when you + request to delete the message. The copy remains on the server + and might be returned to you again on a subsequent receive + request. You should create your system to be idempotent so + that receiving a particular message more than once is not a + problem. + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type receipt_handle: string + :param receipt_handle: The receipt handle associated with the message + to delete. + + """ + params = { + 'QueueUrl': queue_url, + 'ReceiptHandle': receipt_handle, + } + return self._make_request( + action='DeleteMessage', + verb='POST', + path='/', params=params) + + def delete_message_batch(self, queue_url, entries): + """ + Deletes multiple messages. This is a batch version of + DeleteMessage. The result of the delete action on each message + is reported individually in the response. + + Because the batch request can result in a combination of + successful and unsuccessful actions, you should check for + batch errors even when the call returns an HTTP status code of + 200. + Some API actions take lists of parameters. These lists are + specified using the `param.n` notation. Values of `n` are + integers starting from 1. For example, a parameter list with + two elements looks like this: + `&Attribute.1=this` + + `&Attribute.2=that` + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type entries: list + :param entries: A list of receipt handles for the messages to be + deleted. + + """ + params = {'QueueUrl': queue_url, } + self.build_complex_list_params( + params, entries, + 'Entries.member', + ('Id', 'ReceiptHandle')) + return self._make_request( + action='DeleteMessageBatch', + verb='POST', + path='/', params=params) + + def delete_queue(self, queue_url): + """ + Deletes the queue specified by the **queue URL**, regardless + of whether the queue is empty. If the specified queue does not + exist, Amazon SQS returns a successful response. + + Use `DeleteQueue` with care; once you delete your queue, any + messages in the queue are no longer available. + + When you delete a queue, the deletion process takes up to 60 + seconds. Requests you send involving that queue during the 60 + seconds might succeed. For example, a SendMessage request + might succeed, but after the 60 seconds, the queue and that + message you sent no longer exist. Also, when you delete a + queue, you must wait at least 60 seconds before creating a + queue with the same name. + + We reserve the right to delete queues that have had no + activity for more than 30 days. For more information, see `How + Amazon SQS Queues Work`_ in the Amazon SQS Developer Guide . + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + """ + params = {'QueueUrl': queue_url, } + return self._make_request( + action='DeleteQueue', + verb='POST', + path='/', params=params) + + def get_queue_attributes(self, queue_url, attribute_names=None): + """ + Gets attributes for the specified queue. The following + attributes are supported: + + + `All` - returns all values. + + `ApproximateNumberOfMessages` - returns the approximate + number of visible messages in a queue. For more information, + see `Resources Required to Process Messages`_ in the Amazon + SQS Developer Guide . + + `ApproximateNumberOfMessagesNotVisible` - returns the + approximate number of messages that are not timed-out and not + deleted. For more information, see `Resources Required to + Process Messages`_ in the Amazon SQS Developer Guide . + + `VisibilityTimeout` - returns the visibility timeout for the + queue. For more information about visibility timeout, see + `Visibility Timeout`_ in the Amazon SQS Developer Guide . + + `CreatedTimestamp` - returns the time when the queue was + created (epoch time in seconds). + + `LastModifiedTimestamp` - returns the time when the queue + was last changed (epoch time in seconds). + + `Policy` - returns the queue's policy. + + `MaximumMessageSize` - returns the limit of how many bytes a + message can contain before Amazon SQS rejects it. + + `MessageRetentionPeriod` - returns the number of seconds + Amazon SQS retains a message. + + `QueueArn` - returns the queue's Amazon resource name (ARN). + + `ApproximateNumberOfMessagesDelayed` - returns the + approximate number of messages that are pending to be added to + the queue. + + `DelaySeconds` - returns the default delay on the queue in + seconds. + + `ReceiveMessageWaitTimeSeconds` - returns the time for which + a ReceiveMessage call will wait for a message to arrive. + + `RedrivePolicy` - returns the parameters for dead letter + queue functionality of the source queue. For more information + about RedrivePolicy and dead letter queues, see `Using Amazon + SQS Dead Letter Queues`_ in the Amazon SQS Developer Guide . + + + Going forward, new attributes might be added. If you are + writing code that calls this action, we recommend that you + structure your code so that it can handle new attributes + gracefully. Some API actions take lists of parameters. These + lists are specified using the `param.n` notation. Values of + `n` are integers starting from 1. For example, a parameter + list with two elements looks like this: + `&Attribute.1=this` + + `&Attribute.2=that` + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type attribute_names: list + :param attribute_names: A list of attributes to retrieve information + for. + + """ + params = {'QueueUrl': queue_url, } + if attribute_names is not None: + self.build_list_params(params, + attribute_names, + 'AttributeNames.member') + return self._make_request( + action='GetQueueAttributes', + verb='POST', + path='/', params=params) + + def get_queue_url(self, queue_name, queue_owner_aws_account_id=None): + """ + Returns the URL of an existing queue. This action provides a + simple way to retrieve the URL of an Amazon SQS queue. + + To access a queue that belongs to another AWS account, use the + `QueueOwnerAWSAccountId` parameter to specify the account ID + of the queue's owner. The queue's owner must grant you + permission to access the queue. For more information about + shared queue access, see AddPermission or go to `Shared + Queues`_ in the Amazon SQS Developer Guide . + + :type queue_name: string + :param queue_name: The name of the queue whose URL must be fetched. + Maximum 80 characters; alphanumeric characters, hyphens (-), and + underscores (_) are allowed. + + :type queue_owner_aws_account_id: string + :param queue_owner_aws_account_id: The AWS account ID of the account + that created the queue. + + """ + params = {'QueueName': queue_name, } + if queue_owner_aws_account_id is not None: + params['QueueOwnerAWSAccountId'] = queue_owner_aws_account_id + return self._make_request( + action='GetQueueUrl', + verb='POST', + path='/', params=params) + + def list_dead_letter_source_queues(self, queue_url): + """ + Returns a list of your queues that have the RedrivePolicy + queue attribute configured with a dead letter queue. + + :type queue_url: string + :param queue_url: The queue URL of a dead letter queue. + + """ + params = {'QueueUrl': queue_url, } + return self._make_request( + action='ListDeadLetterSourceQueues', + verb='POST', + path='/', params=params) + + def list_queues(self, queue_name_prefix=None): + """ + Returns a list of your queues. The maximum number of queues + that can be returned is 1000. If you specify a value for the + optional `QueueNamePrefix` parameter, only queues with a name + beginning with the specified value are returned. + + :type queue_name_prefix: string + :param queue_name_prefix: A string to use for filtering the list + results. Only those queues whose name begins with the specified + string are returned. + + """ + params = {} + if queue_name_prefix is not None: + params['QueueNamePrefix'] = queue_name_prefix + return self._make_request( + action='ListQueues', + verb='POST', + path='/', params=params) + + def receive_message(self, queue_url, attribute_names=None, + max_number_of_messages=None, visibility_timeout=None, + wait_time_seconds=None): + """ + Retrieves one or more messages from the specified queue. Long + poll support is enabled by using the `WaitTimeSeconds` + parameter. For more information, see `Amazon SQS Long Poll`_ + in the Amazon SQS Developer Guide . + + Short poll is the default behavior where a weighted random set + of machines is sampled on a `ReceiveMessage` call. This means + only the messages on the sampled machines are returned. If the + number of messages in the queue is small (less than 1000), it + is likely you will get fewer messages than you requested per + `ReceiveMessage` call. If the number of messages in the queue + is extremely small, you might not receive any messages in a + particular `ReceiveMessage` response; in which case you should + repeat the request. + + For each message returned, the response includes the + following: + + + + Message body + + MD5 digest of the message body. For information about MD5, + go to `http://www.faqs.org/rfcs/rfc1321.html`_. + + Message ID you received when you sent the message to the + queue. + + Receipt handle. + + + The receipt handle is the identifier you must provide when + deleting the message. For more information, see `Queue and + Message Identifiers`_ in the Amazon SQS Developer Guide . + + You can provide the `VisibilityTimeout` parameter in your + request, which will be applied to the messages that Amazon SQS + returns in the response. If you do not include the parameter, + the overall visibility timeout for the queue is used for the + returned messages. For more information, see `Visibility + Timeout`_ in the Amazon SQS Developer Guide . + + Going forward, new attributes might be added. If you are + writing code that calls this action, we recommend that you + structure your code so that it can handle new attributes + gracefully. + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type attribute_names: list + :param attribute_names: + A list of attributes that need to be returned along with each message. + + The following lists the names and descriptions of the attributes that + can be returned: + + + + `All` - returns all values. + + `ApproximateFirstReceiveTimestamp` - returns the time when the + message was first received (epoch time in milliseconds). + + `ApproximateReceiveCount` - returns the number of times a message has + been received but not deleted. + + `SenderId` - returns the AWS account number (or the IP address, if + anonymous access is allowed) of the sender. + + `SentTimestamp` - returns the time when the message was sent (epoch + time in milliseconds). + + :type max_number_of_messages: integer + :param max_number_of_messages: The maximum number of messages to + return. Amazon SQS never returns more messages than this value but + may return fewer. + All of the messages are not necessarily returned. + + :type visibility_timeout: integer + :param visibility_timeout: The duration (in seconds) that the received + messages are hidden from subsequent retrieve requests after being + retrieved by a `ReceiveMessage` request. + + :type wait_time_seconds: integer + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. If + a message is available, the call will return sooner than + WaitTimeSeconds. + + """ + params = {'QueueUrl': queue_url, } + if attribute_names is not None: + self.build_list_params(params, + attribute_names, + 'AttributeNames.member') + if max_number_of_messages is not None: + params['MaxNumberOfMessages'] = max_number_of_messages + if visibility_timeout is not None: + params['VisibilityTimeout'] = visibility_timeout + if wait_time_seconds is not None: + params['WaitTimeSeconds'] = wait_time_seconds + return self._make_request( + action='ReceiveMessage', + verb='POST', + path='/', params=params) + + def remove_permission(self, queue_url, label): + """ + Revokes any permissions in the queue policy that matches the + specified `Label` parameter. Only the owner of the queue can + remove permissions. + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type label: string + :param label: The identification of the permission to remove. This is + the label added with the AddPermission action. + + """ + params = {'QueueUrl': queue_url, 'Label': label, } + return self._make_request( + action='RemovePermission', + verb='POST', + path='/', params=params) + + def send_message(self, queue_url, message_body, delay_seconds=None): + """ + Delivers a message to the specified queue. With Amazon SQS, + you now have the ability to send large payload messages that + are up to 256KB (262,144 bytes) in size. To send large + payloads, you must use an AWS SDK that supports SigV4 signing. + To verify whether SigV4 is supported for an AWS SDK, check the + SDK release notes. + + The following list shows the characters (in Unicode) allowed + in your message, according to the W3C XML specification. For + more information, go to `http://www.w3.org/TR/REC- + xml/#charsets`_ If you send any characters not included in the + list, your request will be rejected. + + #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | + [#x10000 to #x10FFFF] + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type message_body: string + :param message_body: The message to send. String maximum 256 KB in + size. For a list of allowed characters, see the preceding important + note. + + :type delay_seconds: integer + :param delay_seconds: The number of seconds (0 to 900 - 15 minutes) to + delay a specific message. Messages with a positive `DelaySeconds` + value become available for processing after the delay time is + finished. If you don't specify a value, the default value for the + queue applies. + + """ + params = { + 'QueueUrl': queue_url, + 'MessageBody': message_body, + } + if delay_seconds is not None: + params['DelaySeconds'] = delay_seconds + return self._make_request( + action='SendMessage', + verb='POST', + path='/', params=params) + + def send_message_batch(self, queue_url, entries): + """ + Delivers up to ten messages to the specified queue. This is a + batch version of SendMessage. The result of the send action on + each message is reported individually in the response. The + maximum allowed individual message size is 256 KB (262,144 + bytes). + + The maximum total payload size (i.e., the sum of all a batch's + individual message lengths) is also 256 KB (262,144 bytes). + + If the `DelaySeconds` parameter is not specified for an entry, + the default for the queue is used. + The following list shows the characters (in Unicode) that are + allowed in your message, according to the W3C XML + specification. For more information, go to + `http://www.faqs.org/rfcs/rfc1321.html`_. If you send any + characters that are not included in the list, your request + will be rejected. + #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | + [#x10000 to #x10FFFF] + Because the batch request can result in a combination of + successful and unsuccessful actions, you should check for + batch errors even when the call returns an HTTP status code of + 200. Some API actions take lists of parameters. These lists + are specified using the `param.n` notation. Values of `n` are + integers starting from 1. For example, a parameter list with + two elements looks like this: + `&Attribute.1=this` + + `&Attribute.2=that` + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type entries: list + :param entries: A list of SendMessageBatchRequestEntry items. + + """ + params = {'QueueUrl': queue_url, } + self.build_complex_list_params( + params, entries, + 'Entries.member', + ('Id', 'MessageBody', 'DelaySeconds')) + return self._make_request( + action='SendMessageBatch', + verb='POST', + path='/', params=params) + + def set_queue_attributes(self, queue_url, attributes): + """ + Sets the value of one or more queue attributes. + Going forward, new attributes might be added. If you are + writing code that calls this action, we recommend that you + structure your code so that it can handle new attributes + gracefully. + + :type queue_url: string + :param queue_url: The URL of the Amazon SQS queue to take action on. + + :type attributes: map + :param attributes: A map of attributes to set. + The following lists the names, descriptions, and values of the special + request parameters the `SetQueueAttributes` action uses: + + + + + `DelaySeconds` - The time in seconds that the delivery of all + messages in the queue will be delayed. An integer from 0 to 900 (15 + minutes). The default for this attribute is 0 (zero). + + `MaximumMessageSize` - The limit of how many bytes a message can + contain before Amazon SQS rejects it. An integer from 1024 bytes (1 + KiB) up to 262144 bytes (256 KiB). The default for this attribute + is 262144 (256 KiB). + + `MessageRetentionPeriod` - The number of seconds Amazon SQS retains a + message. Integer representing seconds, from 60 (1 minute) to + 1209600 (14 days). The default for this attribute is 345600 (4 + days). + + `Policy` - The queue's policy. A valid form-url-encoded policy. For + more information about policy structure, see `Basic Policy + Structure`_ in the Amazon SQS Developer Guide . For more + information about form-url-encoding, see `http://www.w3.org/MarkUp + /html-spec/html-spec_8.html#SEC8.2.1`_. + + `ReceiveMessageWaitTimeSeconds` - The time for which a ReceiveMessage + call will wait for a message to arrive. An integer from 0 to 20 + (seconds). The default for this attribute is 0. + + `VisibilityTimeout` - The visibility timeout for the queue. An + integer from 0 to 43200 (12 hours). The default for this attribute + is 30. For more information about visibility timeout, see + Visibility Timeout in the Amazon SQS Developer Guide . + + `RedrivePolicy` - The parameters for dead letter queue functionality + of the source queue. For more information about RedrivePolicy and + dead letter queues, see Using Amazon SQS Dead Letter Queues in the + Amazon SQS Developer Guide . + + """ + params = {'QueueUrl': queue_url, } + # TODO: NEED TO PROCESS COMPLEX ARG attributes of type map. + return self._make_request( + action='SetQueueAttributes', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/awx/lib/site-packages/boto/sqs/message.py b/awx/lib/site-packages/boto/sqs/message.py index a036471298..ce7976c1a8 100644 --- a/awx/lib/site-packages/boto/sqs/message.py +++ b/awx/lib/site-packages/boto/sqs/message.py @@ -177,7 +177,7 @@ class MHMessage(Message): """ def __init__(self, queue=None, body=None, xml_attrs=None): - if body == None or body == '': + if body is None or body == '': body = {} super(MHMessage, self).__init__(queue, body) diff --git a/awx/lib/site-packages/boto/sqs/regioninfo.py b/awx/lib/site-packages/boto/sqs/regioninfo.py index 9f7d7f12fa..d21dff9cda 100644 --- a/awx/lib/site-packages/boto/sqs/regioninfo.py +++ b/awx/lib/site-packages/boto/sqs/regioninfo.py @@ -26,7 +26,8 @@ from boto.regioninfo import RegionInfo class SQSRegionInfo(RegionInfo): - def __init__(self, connection=None, name=None, endpoint=None): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): from boto.sqs.connection import SQSConnection super(SQSRegionInfo, self).__init__(connection, name, endpoint, SQSConnection) diff --git a/awx/lib/site-packages/boto/sts/__init__.py b/awx/lib/site-packages/boto/sts/__init__.py index d7c26f56ec..a130b7fed9 100644 --- a/awx/lib/site-packages/boto/sts/__init__.py +++ b/awx/lib/site-packages/boto/sts/__init__.py @@ -21,7 +21,7 @@ # IN THE SOFTWARE. from connection import STSConnection -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,16 +31,7 @@ def regions(): :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ - return [RegionInfo(name='us-east-1', - endpoint='sts.amazonaws.com', - connection_cls=STSConnection), - RegionInfo(name='us-gov-west-1', - endpoint='sts.us-gov-west-1.amazonaws.com', - connection_cls=STSConnection), - RegionInfo(name='cn-north-1', - endpoint='sts.cn-north-1.amazonaws.com.cn', - connection_cls=STSConnection), - ] + return get_regions('sts', connection_cls=STSConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/sts/connection.py b/awx/lib/site-packages/boto/sts/connection.py index 7c480a3fbf..4672c7c88f 100644 --- a/awx/lib/site-packages/boto/sts/connection.py +++ b/awx/lib/site-packages/boto/sts/connection.py @@ -70,7 +70,7 @@ class STSConnection(AWSQueryConnection): proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', converter=None, validate_certs=True, anon=False, - security_token=None): + security_token=None, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, @@ -85,7 +85,8 @@ class STSConnection(AWSQueryConnection): self.region.endpoint, debug, https_connection_factory, path, validate_certs=validate_certs, - security_token=security_token) + security_token=security_token, + profile_name=profile_name) def _required_auth_capability(self): if self.anon: @@ -237,7 +238,9 @@ class STSConnection(AWSQueryConnection): FederationToken, verb='POST') def assume_role(self, role_arn, role_session_name, policy=None, - duration_seconds=None, external_id=None): + duration_seconds=None, external_id=None, + mfa_serial_number=None, + mfa_token=None): """ Returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) @@ -327,6 +330,24 @@ class STSConnection(AWSQueryConnection): information about the external ID, see `About the External ID`_ in Using Temporary Security Credentials . + :type mfa_serial_number: string + :param mfa_serial_number: The identification number of the MFA device that + is associated with the user who is making the AssumeRole call. + Specify this value if the trust policy of the role being assumed + includes a condition that requires MFA authentication. The value is + either the serial number for a hardware device (such as + GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device + (such as arn:aws:iam::123456789012:mfa/user). Minimum length of 9. + Maximum length of 256. + + :type mfa_token: string + :param mfa_token: The value provided by the MFA device, if the trust + policy of the role being assumed requires MFA (that is, if the + policy includes a condition that tests for MFA). If the role being + assumed requires MFA and if the TokenCode value is missing or + expired, the AssumeRole call returns an "access denied" errror. + Minimum length of 6. Maximum length of 6. + """ params = { 'RoleArn': role_arn, @@ -338,6 +359,10 @@ class STSConnection(AWSQueryConnection): params['DurationSeconds'] = duration_seconds if external_id is not None: params['ExternalId'] = external_id + if mfa_serial_number is not None: + params['SerialNumber'] = mfa_serial_number + if mfa_token is not None: + params['TokenCode'] = mfa_token return self.get_object('AssumeRole', params, AssumedRole, verb='POST') def assume_role_with_saml(self, role_arn, principal_arn, saml_assertion, diff --git a/awx/lib/site-packages/boto/support/__init__.py b/awx/lib/site-packages/boto/support/__init__.py index 6d59b375e0..c114a9a373 100644 --- a/awx/lib/site-packages/boto/support/__init__.py +++ b/awx/lib/site-packages/boto/support/__init__.py @@ -20,7 +20,7 @@ # IN THE SOFTWARE. # -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(): @@ -31,13 +31,7 @@ def regions(): :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.support.layer1 import SupportConnection - return [ - RegionInfo( - name='us-east-1', - endpoint='support.us-east-1.amazonaws.com', - connection_cls=SupportConnection - ), - ] + return get_regions('support', connection_cls=SupportConnection) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/support/layer1.py b/awx/lib/site-packages/boto/support/layer1.py index 3b52efd90b..c4e18da030 100644 --- a/awx/lib/site-packages/boto/support/layer1.py +++ b/awx/lib/site-packages/boto/support/layer1.py @@ -42,24 +42,24 @@ class SupportConnection(AWSQueryConnection): format. The AWS Support service also exposes a set of `Trusted Advisor`_ - features. You can retrieve a list of checks you can run on your - resources, specify checks to run and refresh, and check the status - of checks you have submitted. + features. You can retrieve a list of checks and their + descriptions, get check results, specify checks to refresh, and + get the refresh status of checks. The following list describes the AWS Support case management actions: + **Service names, issue categories, and available severity - levels. **The actions `DescribeServices`_ and - `DescribeSeverityLevels`_ enable you to obtain AWS service names, - service codes, service categories, and problem severity levels. - You use these values when you call the `CreateCase`_ action. - + **Case Creation, case details, and case resolution**. The - actions `CreateCase`_, `DescribeCases`_, and `ResolveCase`_ enable - you to create AWS Support cases, retrieve them, and resolve them. - + **Case communication**. The actions `DescribeCommunications`_ - and `AddCommunicationToCase`_ enable you to retrieve and add + levels. **The actions DescribeServices and DescribeSeverityLevels + enable you to obtain AWS service names, service codes, service + categories, and problem severity levels. You use these values when + you call the CreateCase action. + + **Case creation, case details, and case resolution.** The + actions CreateCase, DescribeCases, and ResolveCase enable you to + create AWS Support cases, retrieve them, and resolve them. + + **Case communication.** The actions DescribeCommunications and + AddCommunicationToCase enable you to retrieve and add communication to AWS Support cases. @@ -67,26 +67,26 @@ class SupportConnection(AWSQueryConnection): Support service for Trusted Advisor: - + `DescribeTrustedAdvisorChecks`_ returns the list of checks that - you can run against your AWS resources. + + DescribeTrustedAdvisorChecks returns the list of checks that run + against your AWS resources. + Using the CheckId for a specific check returned by DescribeTrustedAdvisorChecks, you can call - `DescribeTrustedAdvisorCheckResult`_ and obtain a new result for the check you specified. - + Using `DescribeTrustedAdvisorCheckSummaries`_, you can get - summaries for a set of Trusted Advisor checks. - + `RefreshTrustedAdvisorCheck`_ enables you to request that - Trusted Advisor run the check again. - + `DescribeTrustedAdvisorCheckRefreshStatuses`_ gets statuses on - the checks you are running. + DescribeTrustedAdvisorCheckResult to obtain the results for the + check you specified. + + DescribeTrustedAdvisorCheckSummaries returns summarized results + for one or more Trusted Advisor checks. + + RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a + specified check. + + DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh + status of one or more checks. - For authentication of requests, the AWS Support uses `Signature + For authentication of requests, AWS Support uses `Signature Version 4 Signing Process`_. - See the AWS Support `Developer Guide`_ for information about how - to use this service to manage create and manage your support - cases, and how to call Trusted Advisor for results of checks on - your resources. + See the AWS Support `User Guide`_ for information about how to use + this service to create and manage your support cases, and how to + call Trusted Advisor for results of checks on your resources. """ APIVersion = "2013-04-15" DefaultRegionName = "us-east-1" @@ -117,32 +117,30 @@ class SupportConnection(AWSQueryConnection): def add_communication_to_case(self, communication_body, case_id=None, cc_email_addresses=None): """ - This action adds additional customer communication to an AWS - Support case. You use the CaseId value to identify the case to - which you want to add communication. You can list a set of - email addresses to copy on the communication using the - CcEmailAddresses value. The CommunicationBody value contains - the text of the communication. + Adds additional customer communication to an AWS Support case. + You use the `CaseId` value to identify the case to add + communication to. You can list a set of email addresses to + copy on the communication using the `CcEmailAddresses` value. + The `CommunicationBody` value contains the text of the + communication. - This action's response indicates the success or failure of the - request. + The response indicates the success or failure of the request. - This action implements a subset of the behavior on the AWS + This operation implements a subset of the behavior on the AWS Support `Your Support Cases`_ web form. :type case_id: string - :param case_id: String that indicates the AWS Support caseID requested - or returned in the call. The caseID is an alphanumeric string - formatted as shown in this example CaseId: - case-12345678910-2013-c4c1d2bf33c5cf47 + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 :type communication_body: string - :param communication_body: Represents the body of an email - communication added to the support case. + :param communication_body: The body of an email communication to add to + the support case. :type cc_email_addresses: list - :param cc_email_addresses: Represents any email addresses contained in - the CC line of an email added to the support case. + :param cc_email_addresses: The email addresses in the CC line of an + email to be added to the support case. """ params = {'communicationBody': communication_body, } @@ -157,89 +155,89 @@ class SupportConnection(AWSQueryConnection): severity_code=None, category_code=None, cc_email_addresses=None, language=None, issue_type=None): """ - Creates a new case in the AWS Support Center. This action is - modeled on the behavior of the AWS Support Center `Open a new - case`_ page. Its parameters require you to specify the + Creates a new case in the AWS Support Center. This operation + is modeled on the behavior of the AWS Support Center `Open a + new case`_ page. Its parameters require you to specify the following information: - #. **ServiceCode.** Represents a code for an AWS service. You - obtain the ServiceCode by calling `DescribeServices`_. - #. **CategoryCode**. Represents a category for the service - defined for the ServiceCode value. You also obtain the - cateogory code for a service by calling `DescribeServices`_. - Each AWS service defines its own set of category codes. - #. **SeverityCode**. Represents a value that specifies the - urgency of the case, and the time interval in which your - service level agreement specifies a response from AWS Support. - You obtain the SeverityCode by calling - `DescribeSeverityLevels`_. - #. **Subject**. Represents the **Subject** field on the AWS + #. **ServiceCode.** The code for an AWS service. You obtain + the `ServiceCode` by calling DescribeServices. + #. **CategoryCode.** The category for the service defined for + the `ServiceCode` value. You also obtain the category code for + a service by calling DescribeServices. Each AWS service + defines its own set of category codes. + #. **SeverityCode.** A value that indicates the urgency of the + case, which in turn determines the response time according to + your service level agreement with AWS Support. You obtain the + SeverityCode by calling DescribeSeverityLevels. + #. **Subject.** The **Subject** field on the AWS Support + Center `Open a new case`_ page. + #. **CommunicationBody.** The **Description** field on the AWS Support Center `Open a new case`_ page. - #. **CommunicationBody**. Represents the **Description** field - on the AWS Support Center `Open a new case`_ page. - #. **Language**. Specifies the human language in which AWS - Support handles the case. The API currently supports English - and Japanese. - #. **CcEmailAddresses**. Represents the AWS Support Center - **CC** field on the `Open a new case`_ page. You can list - email addresses to be copied on any correspondence about the - case. The account that opens the case is already identified by - passing the AWS Credentials in the HTTP POST method or in a - method or function call from one of the programming languages - supported by an `AWS SDK`_. - #. **IssueType**. Indicates the type of issue for the case. - You can specify either "customer-service" or "technical." If - you do not indicate a value, this parameter defaults to - "technical." + #. **Language.** The human language in which AWS Support + handles the case. English and Japanese are currently + supported. + #. **CcEmailAddresses.** The AWS Support Center **CC** field + on the `Open a new case`_ page. You can list email addresses + to be copied on any correspondence about the case. The account + that opens the case is already identified by passing the AWS + Credentials in the HTTP POST method or in a method or function + call from one of the programming languages supported by an + `AWS SDK`_. + #. **IssueType.** The type of issue for the case. You can + specify either "customer-service" or "technical." If you do + not indicate a value, the default is "technical." + The AWS Support API does not currently support the ability to add attachments to cases. You can, however, call - `AddCommunicationToCase`_ to add information to an open case. + AddCommunicationToCase to add information to an open case. - A successful `CreateCase`_ request returns an AWS Support case - number. Case numbers are used by `DescribeCases`_ request to - retrieve existing AWS Support support cases. + + A successful CreateCase request returns an AWS Support case + number. Case numbers are used by the DescribeCases action to + retrieve existing AWS Support cases. :type subject: string - :param subject: Title of the AWS Support case. + :param subject: The title of the AWS Support case. :type service_code: string - :param service_code: Code for the AWS service returned by the call to - `DescribeServices`_. + :param service_code: The code for the AWS service returned by the call + to DescribeServices. :type severity_code: string :param severity_code: - Code for the severity level returned by the call to - `DescribeSeverityLevels`_. + The code for the severity level returned by the call to + DescribeSeverityLevels. + The availability of severity levels depends on each customer's support subscription. In other words, your subscription may not necessarily require the urgent level of response time. :type category_code: string - :param category_code: Specifies the category of problem for the AWS - Support case. + :param category_code: The category of problem for the AWS Support case. :type communication_body: string - :param communication_body: Parameter that represents the communication - body text when you create an AWS Support case by calling - `CreateCase`_. + :param communication_body: The communication body text when you create + an AWS Support case by calling CreateCase. :type cc_email_addresses: list - :param cc_email_addresses: List of email addresses that AWS Support + :param cc_email_addresses: A list of email addresses that AWS Support copies on case correspondence. :type language: string - :param language: Specifies the ISO 639-1 code for the language in which - AWS provides support. AWS Support currently supports English and - Japanese, for which the codes are en and ja , respectively. - Language parameters must be passed explicitly for operations that - take them. + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. :type issue_type: string - :param issue_type: Field passed as a parameter in a `CreateCase`_ call. + :param issue_type: The type of issue for the case. You can specify + either "customer-service" or "technical." If you do not indicate a + value, the default is "technical." """ params = { @@ -266,51 +264,51 @@ class SupportConnection(AWSQueryConnection): include_resolved_cases=None, next_token=None, max_results=None, language=None): """ - This action returns a list of cases that you specify by - passing one or more CaseIds. In addition, you can filter the - cases by date by setting values for the AfterTime and - BeforeTime request parameters. + Returns a list of cases that you specify by passing one or + more case IDs. In addition, you can filter the cases by date + by setting values for the `AfterTime` and `BeforeTime` request + parameters. + The response returns the following in JSON format: - #. One or more `CaseDetails`_ data types. - #. One or more NextToken objects, strings that specifies where - to paginate the returned records represented by CaseDetails . + + #. One or more CaseDetails data types. + #. One or more `NextToken` values, which specify where to + paginate the returned records represented by the `CaseDetails` + objects. :type case_id_list: list - :param case_id_list: A list of Strings comprising ID numbers for - support cases you want returned. The maximum number of cases is - 100. + :param case_id_list: A list of ID numbers of the support cases you want + returned. The maximum number of cases is 100. :type display_id: string - :param display_id: String that corresponds to the ID value displayed - for a case in the AWS Support Center user interface. + :param display_id: The ID displayed for a case in the AWS Support + Center user interface. :type after_time: string - :param after_time: Start date for a filtered date search on support + :param after_time: The start date for a filtered date search on support case communications. :type before_time: string - :param before_time: End date for a filtered date search on support case - communications. + :param before_time: The end date for a filtered date search on support + case communications. :type include_resolved_cases: boolean - :param include_resolved_cases: Boolean that indicates whether or not - resolved support cases should be listed in the `DescribeCases`_ - search. + :param include_resolved_cases: Specifies whether resolved support cases + should be included in the DescribeCases results. :type next_token: string - :param next_token: Defines a resumption point for pagination. + :param next_token: A resumption point for pagination. :type max_results: integer - :param max_results: Integer that sets the maximum number of results to - return before paginating. + :param max_results: The maximum number of results to return before + paginating. :type language: string - :param language: Specifies the ISO 639-1 code for the language in which - AWS provides support. AWS Support currently supports English and - Japanese, for which the codes are en and ja , respectively. - Language parameters must be passed explicitly for operations that - take them. + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {} @@ -337,36 +335,35 @@ class SupportConnection(AWSQueryConnection): after_time=None, next_token=None, max_results=None): """ - This action returns communications regarding the support case. - You can use the AfterTime and BeforeTime parameters to filter - by date. The CaseId parameter enables you to identify a - specific case by its CaseId number. + Returns communications regarding the support case. You can use + the `AfterTime` and `BeforeTime` parameters to filter by date. + The `CaseId` parameter enables you to identify a specific case + by its `CaseId` value. - The MaxResults and NextToken parameters enable you to control - the pagination of the result set. Set MaxResults to the number - of cases you want displayed on each page, and use NextToken to - specify the resumption of pagination. + The `MaxResults` and `NextToken` parameters enable you to + control the pagination of the result set. Set `MaxResults` to + the number of cases you want displayed on each page, and use + `NextToken` to specify the resumption of pagination. :type case_id: string - :param case_id: String that indicates the AWS Support caseID requested - or returned in the call. The caseID is an alphanumeric string - formatted as shown in this example CaseId: - case-12345678910-2013-c4c1d2bf33c5cf47 + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 :type before_time: string - :param before_time: End date for a filtered date search on support case - communications. + :param before_time: The end date for a filtered date search on support + case communications. :type after_time: string - :param after_time: Start date for a filtered date search on support + :param after_time: The start date for a filtered date search on support case communications. :type next_token: string - :param next_token: Defines a resumption point for pagination. + :param next_token: A resumption point for pagination. :type max_results: integer - :param max_results: Integer that sets the maximum number of results to - return before paginating. + :param max_results: The maximum number of results to return before + paginating. """ params = {'caseId': case_id, } @@ -385,7 +382,7 @@ class SupportConnection(AWSQueryConnection): """ Returns the current list of AWS services and a list of service categories that applies to each one. You then use service - names and categories in your `CreateCase`_ requests. Each AWS + names and categories in your CreateCase requests. Each AWS service has its own set of categories. The service codes and category codes correspond to the values @@ -399,15 +396,14 @@ class SupportConnection(AWSQueryConnection): category codes. :type service_code_list: list - :param service_code_list: List in JSON format of service codes + :param service_code_list: A JSON-formatted list of service codes available for AWS services. :type language: string - :param language: Specifies the ISO 639-1 code for the language in which - AWS provides support. AWS Support currently supports English and - Japanese, for which the codes are en and ja , respectively. - Language parameters must be passed explicitly for operations that - take them. + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {} @@ -420,17 +416,16 @@ class SupportConnection(AWSQueryConnection): def describe_severity_levels(self, language=None): """ - This action returns the list of severity levels that you can - assign to an AWS Support case. The severity level for a case - is also a field in the `CaseDetails`_ data type included in - any `CreateCase`_ request. + Returns the list of severity levels that you can assign to an + AWS Support case. The severity level for a case is also a + field in the CaseDetails data type included in any CreateCase + request. :type language: string - :param language: Specifies the ISO 639-1 code for the language in which - AWS provides support. AWS Support currently supports English and - Japanese, for which the codes are en and ja , respectively. - Language parameters must be passed explicitly for operations that - take them. + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {} @@ -441,13 +436,12 @@ class SupportConnection(AWSQueryConnection): def describe_trusted_advisor_check_refresh_statuses(self, check_ids): """ - Returns the status of all refresh requests Trusted Advisor - checks called using `RefreshTrustedAdvisorCheck`_. + Returns the refresh status of the Trusted Advisor checks that + have the specified check IDs. Check IDs can be obtained by + calling DescribeTrustedAdvisorChecks. :type check_ids: list - :param check_ids: List of the CheckId values for the Trusted Advisor - checks for which you want to refresh the status. You obtain the - CheckId values by calling `DescribeTrustedAdvisorChecks`_. + :param check_ids: The IDs of the Trusted Advisor checks. """ params = {'checkIds': check_ids, } @@ -456,41 +450,35 @@ class SupportConnection(AWSQueryConnection): def describe_trusted_advisor_check_result(self, check_id, language=None): """ - This action responds with the results of a Trusted Advisor - check. Once you have obtained the list of available Trusted - Advisor checks by calling `DescribeTrustedAdvisorChecks`_, you - specify the CheckId for the check you want to retrieve from - AWS Support. + Returns the results of the Trusted Advisor check that has the + specified check ID. Check IDs can be obtained by calling + DescribeTrustedAdvisorChecks. - The response for this action contains a JSON-formatted - `TrustedAdvisorCheckResult`_ object - , which is a container for the following three objects: + The response contains a TrustedAdvisorCheckResult object, + which contains these three objects: - - #. `TrustedAdvisorCategorySpecificSummary`_ - #. `TrustedAdvisorResourceDetail`_ - #. `TrustedAdvisorResourcesSummary`_ + + TrustedAdvisorCategorySpecificSummary + + TrustedAdvisorResourceDetail + + TrustedAdvisorResourcesSummary - In addition, the response contains the following fields: + In addition, the response contains these fields: - #. **Status**. Overall status of the check. - #. **Timestamp**. Time at which Trusted Advisor last ran the - check. - #. **CheckId**. Unique identifier for the specific check - returned by the request. + + **Status.** The alert status of the check: "ok" (green), + "warning" (yellow), "error" (red), or "not_available". + + **Timestamp.** The time of the last refresh of the check. + + **CheckId.** The unique identifier for the check. :type check_id: string - :param check_id: + :param check_id: The unique identifier for the Trusted Advisor check. :type language: string - :param language: Specifies the ISO 639-1 code for the language in which - AWS provides support. AWS Support currently supports English and - Japanese, for which the codes are en and ja , respectively. - Language parameters must be passed explicitly for operations that - take them. + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {'checkId': check_id, } @@ -501,17 +489,15 @@ class SupportConnection(AWSQueryConnection): def describe_trusted_advisor_check_summaries(self, check_ids): """ - This action enables you to get the latest summaries for - Trusted Advisor checks that you specify in your request. You - submit the list of Trusted Advisor checks for which you want - summaries. You obtain these CheckIds by submitting a - `DescribeTrustedAdvisorChecks`_ request. + Returns the summaries of the results of the Trusted Advisor + checks that have the specified check IDs. Check IDs can be + obtained by calling DescribeTrustedAdvisorChecks. - The response body contains an array of - `TrustedAdvisorCheckSummary`_ objects. + The response contains an array of TrustedAdvisorCheckSummary + objects. :type check_ids: list - :param check_ids: Unique identifier for a Trusted Advisor check. + :param check_ids: The IDs of the Trusted Advisor checks. """ params = {'checkIds': check_ids, } @@ -520,18 +506,17 @@ class SupportConnection(AWSQueryConnection): def describe_trusted_advisor_checks(self, language): """ - This action enables you to get a list of the available Trusted - Advisor checks. You must specify a language code. English - ("en") and Japanese ("jp") are currently supported. The - response contains a list of `TrustedAdvisorCheckDescription`_ - objects. + Returns information about all available Trusted Advisor + checks, including name, ID, category, description, and + metadata. You must specify a language code; English ("en") and + Japanese ("ja") are currently supported. The response contains + a TrustedAdvisorCheckDescription for each check. :type language: string - :param language: Specifies the ISO 639-1 code for the language in which - AWS provides support. AWS Support currently supports English and - Japanese, for which the codes are en and ja , respectively. - Language parameters must be passed explicitly for operations that - take them. + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. """ params = {'language': language, } @@ -540,14 +525,22 @@ class SupportConnection(AWSQueryConnection): def refresh_trusted_advisor_check(self, check_id): """ - This action enables you to query the service to request a - refresh for a specific Trusted Advisor check. Your request - body contains a CheckId for which you are querying. The - response body contains a `RefreshTrustedAdvisorCheckResult`_ - object containing Status and TimeUntilNextRefresh fields. + Requests a refresh of the Trusted Advisor check that has the + specified check ID. Check IDs can be obtained by calling + DescribeTrustedAdvisorChecks. + + The response contains a RefreshTrustedAdvisorCheckResult + object, which contains these fields: + + + + **Status.** The refresh status of the check: "none", + "enqueued", "processing", "success", or "abandoned". + + **MillisUntilNextRefreshable.** The amount of time, in + milliseconds, until the check is eligible for refresh. + + **CheckId.** The unique identifier for the check. :type check_id: string - :param check_id: + :param check_id: The unique identifier for the Trusted Advisor check. """ params = {'checkId': check_id, } @@ -556,15 +549,14 @@ class SupportConnection(AWSQueryConnection): def resolve_case(self, case_id=None): """ - Takes a CaseId and returns the initial state of the case along - with the state of the case after the call to `ResolveCase`_ + Takes a `CaseId` and returns the initial state of the case + along with the state of the case after the call to ResolveCase completed. :type case_id: string - :param case_id: String that indicates the AWS Support caseID requested - or returned in the call. The caseID is an alphanumeric string - formatted as shown in this example CaseId: - case-12345678910-2013-c4c1d2bf33c5cf47 + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 """ params = {} diff --git a/awx/lib/site-packages/boto/swf/__init__.py b/awx/lib/site-packages/boto/swf/__init__.py index fc6ebfe29c..bebbd69632 100644 --- a/awx/lib/site-packages/boto/swf/__init__.py +++ b/awx/lib/site-packages/boto/swf/__init__.py @@ -23,20 +23,10 @@ # from boto.ec2.regioninfo import RegionInfo +from boto.regioninfo import get_regions, load_regions import boto.swf.layer1 -REGION_ENDPOINTS = { - 'us-east-1': 'swf.us-east-1.amazonaws.com', - 'us-gov-west-1': 'swf.us-gov-west-1.amazonaws.com', - 'us-west-1': 'swf.us-west-1.amazonaws.com', - 'us-west-2': 'swf.us-west-2.amazonaws.com', - 'sa-east-1': 'swf.sa-east-1.amazonaws.com', - 'eu-west-1': 'swf.eu-west-1.amazonaws.com', - 'ap-northeast-1': 'swf.ap-northeast-1.amazonaws.com', - 'ap-southeast-1': 'swf.ap-southeast-1.amazonaws.com', - 'ap-southeast-2': 'swf.ap-southeast-2.amazonaws.com', - 'cn-north-1': 'swf.cn-north-1.amazonaws.com.cn', -} +REGION_ENDPOINTS = load_regions().get('swf', {}) def regions(**kw_params): @@ -46,9 +36,7 @@ def regions(**kw_params): :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` """ - return [RegionInfo(name=region_name, endpoint=REGION_ENDPOINTS[region_name], - connection_cls=boto.swf.layer1.Layer1) - for region_name in REGION_ENDPOINTS] + return get_regions('swf', connection_cls=boto.swf.layer1.Layer1) def connect_to_region(region_name, **kw_params): diff --git a/awx/lib/site-packages/boto/swf/layer1.py b/awx/lib/site-packages/boto/swf/layer1.py index d027abd4ef..73a809b2b2 100644 --- a/awx/lib/site-packages/boto/swf/layer1.py +++ b/awx/lib/site-packages/boto/swf/layer1.py @@ -69,7 +69,7 @@ class Layer1(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, - debug=0, session_token=None, region=None): + debug=0, session_token=None, region=None, profile_name=None): if not region: region_name = boto.config.get('SWF', 'region', self.DefaultRegionName) @@ -82,7 +82,7 @@ class Layer1(AWSAuthConnection): super(Layer1, self).__init__(self.region.endpoint, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, - debug, session_token) + debug, session_token, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] diff --git a/awx/lib/site-packages/boto/utils.py b/awx/lib/site-packages/boto/utils.py index ec81f4da2f..18d34f659d 100644 --- a/awx/lib/site-packages/boto/utils.py +++ b/awx/lib/site-packages/boto/utils.py @@ -843,7 +843,7 @@ def notify(subject, body=None, html_body=None, to_string=None, def get_utf8_value(value): - if not isinstance(value, str) and not isinstance(value, unicode): + if not isinstance(value, basestring): value = str(value) if isinstance(value, unicode): return value.encode('utf-8') @@ -1025,3 +1025,12 @@ def merge_headers_by_name(name, headers): matching_headers = find_matching_headers(name, headers) return ','.join(str(headers[h]) for h in matching_headers if headers[h] is not None) + +class RequestHook(object): + """ + This can be extended and supplied to the connection object + to gain access to request and response object after the request completes. + One use for this would be to implement some specific request logging. + """ + def handle_request_data(self, request, response, error=False): + pass diff --git a/awx/lib/site-packages/boto/vpc/__init__.py b/awx/lib/site-packages/boto/vpc/__init__.py index 7e7c0ef977..4025d6679f 100644 --- a/awx/lib/site-packages/boto/vpc/__init__.py +++ b/awx/lib/site-packages/boto/vpc/__init__.py @@ -35,7 +35,7 @@ from boto.vpc.dhcpoptions import DhcpOptions from boto.vpc.subnet import Subnet from boto.vpc.vpnconnection import VpnConnection from boto.ec2 import RegionData -from boto.regioninfo import RegionInfo +from boto.regioninfo import RegionInfo, get_regions def regions(**kw_params): @@ -48,16 +48,7 @@ def regions(**kw_params): :rtype: list :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` """ - regions = [] - for region_name in RegionData: - region = RegionInfo(name=region_name, - endpoint=RegionData[region_name], - connection_cls=VPCConnection) - regions.append(region) - regions.append(RegionInfo(name='us-gov-west-1', - endpoint=RegionData[region_name], - connection_cls=VPCConnection)) - return regions + return get_regions('ec2', connection_cls=VPCConnection) def connect_to_region(region_name, **kw_params): @@ -1125,7 +1116,7 @@ class VPCConnection(EC2Connection): - *state*, a list of states of the Subnet (pending,available) - - *vpcId*, a list of IDs of teh VPC the subnet is in. + - *vpcId*, a list of IDs of the VPC that the subnet is in. - *cidrBlock*, a list of CIDR blocks of the subnet - *availabilityZone*, list of the Availability Zones the subnet is in. diff --git a/awx/lib/site-packages/celery/__init__.py b/awx/lib/site-packages/celery/__init__.py index c7f9df1cbc..489b6b4e11 100644 --- a/awx/lib/site-packages/celery/__init__.py +++ b/awx/lib/site-packages/celery/__init__.py @@ -14,7 +14,7 @@ version_info_t = namedtuple( ) SERIES = 'Cipater' -VERSION = version_info_t(3, 1, 7, '', '') +VERSION = version_info_t(3, 1, 10, '', '') __version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION) __author__ = 'Ask Solem' __contact__ = 'ask@celeryproject.org' diff --git a/awx/lib/site-packages/celery/app/amqp.py b/awx/lib/site-packages/celery/app/amqp.py index a058ff93f7..b0dae95e02 100644 --- a/awx/lib/site-packages/celery/app/amqp.py +++ b/awx/lib/site-packages/celery/app/amqp.py @@ -8,6 +8,8 @@ """ from __future__ import absolute_import +import numbers + from datetime import timedelta from weakref import WeakValueDictionary @@ -221,6 +223,7 @@ class TaskProducer(Producer): **kwargs): """Send task message.""" retry = self.retry if retry is None else retry + headers = {} if headers is None else headers qname = queue if queue is None and exchange is None: @@ -251,7 +254,7 @@ class TaskProducer(Producer): eta = now + timedelta(seconds=countdown) if self.utc: eta = to_utc(eta).astimezone(self.app.timezone) - if isinstance(expires, (int, float)): + if isinstance(expires, numbers.Real): now = now or self.app.now() expires = now + timedelta(seconds=expires) if self.utc: @@ -379,6 +382,7 @@ class AMQP(object): producer_cls = TaskProducer consumer_cls = TaskConsumer + queues_cls = Queues #: Cached and prepared routing table. _rtable = None @@ -414,7 +418,7 @@ class AMQP(object): routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), ) autoexchange = (self.autoexchange if autoexchange is None else autoexchange) - return Queues( + return self.queues_cls( queues, self.default_exchange, create_missing, ha_policy, autoexchange, ) diff --git a/awx/lib/site-packages/celery/app/base.py b/awx/lib/site-packages/celery/app/base.py index f37155b081..153a5575a2 100644 --- a/awx/lib/site-packages/celery/app/base.py +++ b/awx/lib/site-packages/celery/app/base.py @@ -235,7 +235,8 @@ class Celery(object): 'run': fun if bind else staticmethod(fun), '_decorated': True, '__doc__': fun.__doc__, - '__module__': fun.__module__}, **options))() + '__module__': fun.__module__, + '__wrapped__': fun}, **options))() task = self._tasks[T.name] # return global instance. return task @@ -272,7 +273,8 @@ class Celery(object): if not module_name: if silent: return False - raise ImproperlyConfigured(ERR_ENVVAR_NOT_SET.format(module_name)) + raise ImproperlyConfigured( + ERR_ENVVAR_NOT_SET.format(variable_name)) return self.config_from_object(module_name, silent=silent, force=force) def config_from_cmdline(self, argv, namespace='celery'): diff --git a/awx/lib/site-packages/celery/app/builtins.py b/awx/lib/site-packages/celery/app/builtins.py index 8bb5133093..a609208402 100644 --- a/awx/lib/site-packages/celery/app/builtins.py +++ b/awx/lib/site-packages/celery/app/builtins.py @@ -13,9 +13,12 @@ from collections import deque from celery._state import get_current_worker_task from celery.utils import uuid +from celery.utils.log import get_logger __all__ = ['shared_task', 'load_shared_tasks'] +logger = get_logger(__name__) + #: global list of functions defining tasks that should be #: added to all apps. _shared_tasks = set() @@ -105,16 +108,17 @@ def add_unlock_chord_task(app): ) except StopIteration: reason = repr(exc) - - app._tasks[callback.task].backend.fail_from_current_stack( - callback.id, exc=ChordError(reason), - ) + logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) + app.backend.chord_error_from_stack(callback, + ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: - app._tasks[callback.task].backend.fail_from_current_stack( - callback.id, + logger.error('Chord %r raised: %r', group_id, exc, + exc_info=1) + app.backend.chord_error_from_stack( + callback, exc=ChordError('Callback error: {0!r}'.format(exc)), ) else: @@ -179,7 +183,7 @@ def add_group_task(app): [stask.apply(group_id=group_id) for stask in taskit], ) with app.producer_or_acquire() as pub: - [stask.apply_async(group_id=group_id, publisher=pub, + [stask.apply_async(group_id=group_id, producer=pub, add_to_parent=False) for stask in taskit] parent = get_current_worker_task() if parent: @@ -301,7 +305,7 @@ def add_chain_task(app): if link_error: for task in tasks: task.set(link_error=link_error) - tasks[0].apply_async() + tasks[0].apply_async(**options) return result def apply(self, args=(), kwargs={}, signature=maybe_signature, @@ -340,20 +344,18 @@ def add_chord_task(app): app = self.app propagate = default_propagate if propagate is None else propagate group_id = uuid() - AsyncResult = app.AsyncResult - prepare_member = self._prepare_member # - convert back to group if serialized tasks = header.tasks if isinstance(header, group) else header header = group([ maybe_signature(s, app=app).clone() for s in tasks - ]) + ], app=self.app) # - eager applies the group inline if eager: return header.apply(args=partial_args, task_id=group_id) - results = [AsyncResult(prepare_member(task, body, group_id)) - for task in header.tasks] + body.setdefault('chord_size', len(header.tasks)) + results = header.freeze(group_id=group_id, chord=body).results return self.backend.apply_chord( header, partial_args, group_id, @@ -361,16 +363,6 @@ def add_chord_task(app): max_retries=max_retries, propagate=propagate, result=results, ) - def _prepare_member(self, task, body, group_id): - opts = task.options - # d.setdefault would work but generating uuid's are expensive - try: - task_id = opts['task_id'] - except KeyError: - task_id = opts['task_id'] = uuid() - opts.update(chord=body, group_id=group_id) - return task_id - def apply_async(self, args=(), kwargs={}, task_id=None, group_id=None, chord=None, **options): app = self.app diff --git a/awx/lib/site-packages/celery/app/control.py b/awx/lib/site-packages/celery/app/control.py index fdd49a9d66..34076df0e7 100644 --- a/awx/lib/site-packages/celery/app/control.py +++ b/awx/lib/site-packages/celery/app/control.py @@ -125,8 +125,7 @@ class Control(object): def __init__(self, app=None): self.app = app - self.mailbox = self.Mailbox('celery', type='fanout', - accept=self.app.conf.CELERY_ACCEPT_CONTENT) + self.mailbox = self.Mailbox('celery', type='fanout', accept=['json']) @cached_property def inspect(self): diff --git a/awx/lib/site-packages/celery/app/defaults.py b/awx/lib/site-packages/celery/app/defaults.py index 6cf03cd467..15f7fcfb60 100644 --- a/awx/lib/site-packages/celery/app/defaults.py +++ b/awx/lib/site-packages/celery/app/defaults.py @@ -124,7 +124,7 @@ NAMESPACES = { 'IMPORTS': Option((), type='tuple'), 'INCLUDE': Option((), type='tuple'), 'IGNORE_RESULT': Option(False, type='bool'), - 'MAX_CACHED_RESULTS': Option(5000, type='int'), + 'MAX_CACHED_RESULTS': Option(100, type='int'), 'MESSAGE_COMPRESSION': Option(type='string'), 'MONGODB_BACKEND_SETTINGS': Option(type='dict'), 'REDIS_HOST': Option(type='string', **_REDIS_OLD), @@ -196,6 +196,7 @@ NAMESPACES = { 'SCHEDULE': Option({}, type='dict'), 'SCHEDULER': Option('celery.beat:PersistentScheduler'), 'SCHEDULE_FILENAME': Option('celerybeat-schedule'), + 'SYNC_EVERY': Option(0, type='int'), 'MAX_LOOP_INTERVAL': Option(0, type='float'), 'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0', alt='--loglevel argument'), diff --git a/awx/lib/site-packages/celery/app/log.py b/awx/lib/site-packages/celery/app/log.py index 53c467669d..9dde09b16b 100644 --- a/awx/lib/site-packages/celery/app/log.py +++ b/awx/lib/site-packages/celery/app/log.py @@ -24,7 +24,7 @@ from kombu.utils.encoding import set_default_encoding_file from celery import signals from celery._state import get_current_task from celery.five import class_property, string_t -from celery.utils import isatty +from celery.utils import isatty, node_format from celery.utils.log import ( get_logger, mlevel, ColorFormatter, ensure_process_aware_logger, @@ -65,9 +65,9 @@ class Logging(object): self.colorize = self.app.conf.CELERYD_LOG_COLOR def setup(self, loglevel=None, logfile=None, redirect_stdouts=False, - redirect_level='WARNING', colorize=None): + redirect_level='WARNING', colorize=None, hostname=None): handled = self.setup_logging_subsystem( - loglevel, logfile, colorize=colorize, + loglevel, logfile, colorize=colorize, hostname=hostname, ) if not handled: if redirect_stdouts: @@ -87,10 +87,12 @@ class Logging(object): CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''), ) - def setup_logging_subsystem(self, loglevel=None, logfile=None, - format=None, colorize=None, **kwargs): + def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, + colorize=None, hostname=None, **kwargs): if self.already_setup: return + if logfile and hostname: + logfile = node_format(logfile, hostname) self.already_setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format @@ -107,6 +109,9 @@ class Logging(object): if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] + get_logger('celery').handlers = [] + get_logger('celery.task').handlers = [] + get_logger('celery.redirected').handlers = [] # Configure root logger self._configure_logger( @@ -228,8 +233,8 @@ class Logging(object): return WatchedFileHandler(logfile) def _has_handler(self, logger): - return (logger.handlers and - not isinstance(logger.handlers[0], NullHandler)) + if logger.handlers: + return any(not isinstance(h, NullHandler) for h in logger.handlers) def _is_configured(self, logger): return self._has_handler(logger) and not getattr( diff --git a/awx/lib/site-packages/celery/app/task.py b/awx/lib/site-packages/celery/app/task.py index 6d4c08659d..79f6d3fca4 100644 --- a/awx/lib/site-packages/celery/app/task.py +++ b/awx/lib/site-packages/celery/app/task.py @@ -343,6 +343,8 @@ class Task(object): 'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'), ) + _backend = None # set by backend property. + __bound__ = False # - Tasks are lazily bound, so that configuration is not set @@ -360,7 +362,6 @@ class Task(object): setattr(self, attr_name, conf[config_name]) if self.accept_magic_kwargs is None: self.accept_magic_kwargs = app.accept_magic_kwargs - self.backend = app.backend # decorate with annotations from config. if not was_bound: @@ -555,12 +556,12 @@ class Task(object): ) def subtask_from_request(self, request=None, args=None, kwargs=None, - **extra_options): + queue=None, **extra_options): request = self.request if request is None else request args = request.args if args is None else args kwargs = request.kwargs if kwargs is None else kwargs limit_hard, limit_soft = request.timelimit or (None, None) - options = dict({ + options = { 'task_id': request.id, 'link': request.callbacks, 'link_error': request.errbacks, @@ -568,7 +569,10 @@ class Task(object): 'chord': request.chord, 'soft_time_limit': limit_soft, 'time_limit': limit_hard, - }, **request.delivery_info or {}) + } + options.update( + {'queue': queue} if queue else (request.delivery_info or {}) + ) return self.subtask(args, kwargs, options, type=self, **extra_options) def retry(self, args=None, kwargs=None, exc=None, throw=True, @@ -651,7 +655,10 @@ class Task(object): if max_retries is not None and retries > max_retries: if exc: + # first try to reraise the original exception maybe_reraise() + # or if not in an except block then raise the custom exc. + raise exc() raise self.MaxRetriesExceededError( "Can't retry {0}[{1}] args:{2} kwargs:{3}".format( self.name, request.id, S.args, S.kwargs)) @@ -706,6 +713,7 @@ class Task(object): 'loglevel': options.get('loglevel', 0), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), + 'headers': options.get('headers'), 'delivery_info': {'is_eager': True}} if self.accept_magic_kwargs: default_kwargs = {'task_name': task.name, @@ -896,6 +904,17 @@ class Task(object): self._exec_options = extract_exec_options(self) return self._exec_options + @property + def backend(self): + backend = self._backend + if backend is None: + return self.app.backend + return backend + + @backend.setter + def backend(self, value): # noqa + self._backend = value + @property def __name__(self): return self.__class__.__name__ diff --git a/awx/lib/site-packages/celery/app/trace.py b/awx/lib/site-packages/celery/app/trace.py index 0066ef7e9e..b4c271631c 100644 --- a/awx/lib/site-packages/celery/app/trace.py +++ b/awx/lib/site-packages/celery/app/trace.py @@ -25,7 +25,7 @@ from billiard.einfo import ExceptionInfo from kombu.exceptions import EncodeError from kombu.utils import kwdict -from celery import current_app +from celery import current_app, group from celery import states, signals from celery._state import _task_stack from celery.app import set_default_app @@ -200,8 +200,10 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True, I = Info(state, exc) R = I.handle_error_state(task, eager=eager) if call_errbacks: - [signature(errback, app=app).apply_async((uuid, )) - for errback in request.errbacks or []] + group( + [signature(errback, app=app) + for errback in request.errbacks or []], app=app, + ).apply_async((uuid, )) return I, R, I.state, I.retval def trace_task(uuid, args, kwargs, request=None): @@ -255,8 +257,11 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True, try: # callback tasks must be applied before the result is # stored, so that result.children is populated. - [signature(callback, app=app).apply_async((retval, )) - for callback in task_request.callbacks or []] + group( + [signature(callback, app=app) + for callback in task.request.callbacks or []], + app=app, + ).apply_async((retval, )) if publish_result: store_result( uuid, retval, SUCCESS, request=task_request, @@ -272,7 +277,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True, # -* POST *- if state not in IGNORE_STATES: if task_request.chord: - on_chord_part_return(task) + on_chord_part_return(task, state, R) if task_after_return: task_after_return( state, retval, uuid, args, kwargs, None, @@ -336,7 +341,7 @@ def eager_trace_task(task, uuid, args, kwargs, request=None, **opts): def report_internal_error(task, exc): _type, _value, _tb = sys.exc_info() try: - _value = task.backend.prepare_exception(exc) + _value = task.backend.prepare_exception(exc, 'pickle') exc_info = ExceptionInfo((_type, _value, _tb), internal=True) warn(RuntimeWarning( 'Exception raised outside body: {0!r}:\n{1}'.format( diff --git a/awx/lib/site-packages/celery/apps/beat.py b/awx/lib/site-packages/celery/apps/beat.py index d4ab6e303c..46cef9b8bf 100644 --- a/awx/lib/site-packages/celery/apps/beat.py +++ b/awx/lib/site-packages/celery/apps/beat.py @@ -12,6 +12,7 @@ """ from __future__ import absolute_import, unicode_literals +import numbers import socket import sys @@ -66,7 +67,7 @@ class Beat(object): ) self.pidfile = pidfile - if not isinstance(self.loglevel, int): + if not isinstance(self.loglevel, numbers.Integral): self.loglevel = LOG_LEVELS[self.loglevel.upper()] def _getopt(self, key, value): diff --git a/awx/lib/site-packages/celery/apps/worker.py b/awx/lib/site-packages/celery/apps/worker.py index 61162769bb..d190711082 100644 --- a/awx/lib/site-packages/celery/apps/worker.py +++ b/awx/lib/site-packages/celery/apps/worker.py @@ -25,7 +25,9 @@ from kombu.utils.encoding import safe_str from celery import VERSION_BANNER, platforms, signals from celery.app import trace -from celery.exceptions import CDeprecationWarning, SystemTerminate +from celery.exceptions import ( + CDeprecationWarning, WorkerShutdown, WorkerTerminate, +) from celery.five import string, string_t from celery.loaders.app import AppLoader from celery.platforms import check_privileges @@ -163,10 +165,10 @@ class Worker(WorkController): # Dump configuration to screen so we have some basic information # for when users sends bug reports. - print(''.join([ + print(safe_str(''.join([ string(self.colored.cyan(' \n', self.startup_info())), string(self.colored.reset(self.extra_info() or '')), - ]), file=sys.__stdout__) + ])), file=sys.__stdout__) self.set_process_status('-active-') self.install_platform_tweaks(self) @@ -179,7 +181,7 @@ class Worker(WorkController): colorize = not self.no_color return self.app.log.setup( self.loglevel, self.logfile, - redirect_stdouts=False, colorize=colorize, + redirect_stdouts=False, colorize=colorize, hostname=self.hostname, ) def purge_messages(self): @@ -275,7 +277,7 @@ class Worker(WorkController): def _shutdown_handler(worker, sig='TERM', how='Warm', - exc=SystemExit, callback=None): + exc=WorkerShutdown, callback=None): def _handle_request(*args): with in_sighandler(): @@ -292,11 +294,11 @@ def _shutdown_handler(worker, sig='TERM', how='Warm', _handle_request.__name__ = str('worker_{0}'.format(how)) platforms.signals[sig] = _handle_request install_worker_term_handler = partial( - _shutdown_handler, sig='SIGTERM', how='Warm', exc=SystemExit, + _shutdown_handler, sig='SIGTERM', how='Warm', exc=WorkerShutdown, ) if not is_jython: # pragma: no cover install_worker_term_hard_handler = partial( - _shutdown_handler, sig='SIGQUIT', how='Cold', exc=SystemTerminate, + _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate, ) else: # pragma: no cover install_worker_term_handler = \ @@ -315,6 +317,9 @@ else: # pragma: no cover def _reload_current_worker(): + platforms.close_open_fds([ + sys.__stdin__, sys.__stdout__, sys.__stderr__, + ]) os.execv(sys.executable, [sys.executable] + sys.argv) diff --git a/awx/lib/site-packages/celery/backends/amqp.py b/awx/lib/site-packages/celery/backends/amqp.py index 88f3490e9f..62cf2034ec 100644 --- a/awx/lib/site-packages/celery/backends/amqp.py +++ b/awx/lib/site-packages/celery/backends/amqp.py @@ -141,6 +141,7 @@ class AMQPBackend(BaseBackend): return [self._create_binding(task_id)] def wait_for(self, task_id, timeout=None, cache=True, propagate=True, + no_ack=True, on_interval=None, READY_STATES=states.READY_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): @@ -150,7 +151,8 @@ class AMQPBackend(BaseBackend): meta = cached_meta else: try: - meta = self.consume(task_id, timeout=timeout) + meta = self.consume(task_id, timeout=timeout, no_ack=no_ack, + on_interval=on_interval) except socket.timeout: raise TimeoutError('The operation timed out.') @@ -167,15 +169,18 @@ class AMQPBackend(BaseBackend): prev = latest = acc = None for i in range(backlog_limit): # spool ffwd - prev, latest, acc = latest, acc, binding.get( + acc = binding.get( accept=self.accept, no_ack=False, ) if not acc: # no more messages break + if acc.payload['task_id'] == task_id: + prev, latest = latest, acc if prev: # backends are not expected to keep history, # so we delete everything except the most recent state. prev.ack() + prev = None else: raise self.BacklogLimitExceeded(task_id) @@ -193,7 +198,7 @@ class AMQPBackend(BaseBackend): poll = get_task_meta # XXX compat def drain_events(self, connection, consumer, - timeout=None, now=monotonic, wait=None): + timeout=None, on_interval=None, now=monotonic, wait=None): wait = wait or connection.drain_events results = {} @@ -209,27 +214,30 @@ class AMQPBackend(BaseBackend): if timeout and now() - time_start >= timeout: raise socket.timeout() wait(timeout=timeout) + if on_interval: + on_interval() if results: # got event on the wanted channel. break self._cache.update(results) return results - def consume(self, task_id, timeout=None): + def consume(self, task_id, timeout=None, no_ack=True, on_interval=None): wait = self.drain_events with self.app.pool.acquire_channel(block=True) as (conn, channel): binding = self._create_binding(task_id) with self.Consumer(channel, binding, - no_ack=True, accept=self.accept) as consumer: + no_ack=no_ack, accept=self.accept) as consumer: while 1: try: - return wait(conn, consumer, timeout)[task_id] + return wait( + conn, consumer, timeout, on_interval)[task_id] except KeyError: continue def _many_bindings(self, ids): return [self._create_binding(task_id) for task_id in ids] - def get_many(self, task_ids, timeout=None, + def get_many(self, task_ids, timeout=None, no_ack=True, now=monotonic, getfields=itemgetter('status', 'task_id'), READY_STATES=states.READY_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): @@ -263,7 +271,7 @@ class AMQPBackend(BaseBackend): bindings = self._many_bindings(task_ids) with self.Consumer(channel, bindings, on_message=on_message, - accept=self.accept, no_ack=True): + accept=self.accept, no_ack=no_ack): wait = conn.drain_events popleft = results.popleft while ids: diff --git a/awx/lib/site-packages/celery/backends/base.py b/awx/lib/site-packages/celery/backends/base.py index ecb35108ae..2ca4cc0019 100644 --- a/awx/lib/site-packages/celery/backends/base.py +++ b/awx/lib/site-packages/celery/backends/base.py @@ -26,12 +26,16 @@ from kombu.serialization import ( from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8 from celery import states +from celery import current_app, maybe_signature from celery.app import current_task from celery.exceptions import ChordError, TimeoutError, TaskRevokedError from celery.five import items -from celery.result import result_from_tuple, GroupResult +from celery.result import ( + GroupResult, ResultBase, allow_join_result, result_from_tuple, +) from celery.utils import timeutils from celery.utils.functional import LRUCache +from celery.utils.log import get_logger from celery.utils.serialization import ( get_pickled_exception, get_pickleable_exception, @@ -43,13 +47,20 @@ __all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend'] EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml']) PY3 = sys.version_info >= (3, 0) +logger = get_logger(__name__) + def unpickle_backend(cls, args, kwargs): """Return an unpickled backend.""" - from celery import current_app return cls(*args, app=current_app._get_current_object(), **kwargs) +class _nulldict(dict): + + def __setitem__(self, k, v): + pass + + class BaseBackend(object): READY_STATES = states.READY_STATES UNREADY_STATES = states.UNREADY_STATES @@ -73,6 +84,13 @@ class BaseBackend(object): #: Set to true if the backend is peristent by default. persistent = True + retry_policy = { + 'max_retries': 20, + 'interval_start': 0, + 'interval_step': 1, + 'interval_max': 1, + } + def __init__(self, app, serializer=None, max_cached_results=None, accept=None, **kwargs): self.app = app @@ -81,9 +99,8 @@ class BaseBackend(object): (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] - self._cache = LRUCache( - limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS, - ) + cmax = max_cached_results or conf.CELERY_MAX_CACHED_RESULTS + self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) self.accept = prepare_accept_content( conf.CELERY_ACCEPT_CONTENT if accept is None else accept, ) @@ -102,6 +119,21 @@ class BaseBackend(object): return self.store_result(task_id, exc, status=states.FAILURE, traceback=traceback, request=request) + def chord_error_from_stack(self, callback, exc=None): + from celery import group + app = self.app + backend = app._tasks[callback.task].backend + try: + group( + [app.signature(errback) + for errback in callback.options.get('link_error') or []], + app=app, + ).apply_async((callback.id, )) + except Exception as eb_exc: + return backend.fail_from_current_stack(callback.id, exc=eb_exc) + else: + return backend.fail_from_current_stack(callback.id, exc=exc) + def fail_from_current_stack(self, task_id, exc=None): type_, real_exc, tb = sys.exc_info() try: @@ -123,9 +155,10 @@ class BaseBackend(object): status=states.REVOKED, traceback=None, request=request) - def prepare_exception(self, exc): + def prepare_exception(self, exc, serializer=None): """Prepare exception for serialization.""" - if self.serializer in EXCEPTION_ABLE_CODECS: + serializer = self.serializer if serializer is None else serializer + if serializer in EXCEPTION_ABLE_CODECS: return get_pickleable_exception(exc) return {'exc_type': type(exc).__name__, 'exc_message': str(exc)} @@ -138,7 +171,7 @@ class BaseBackend(object): def prepare_value(self, result): """Prepare value for storage.""" - if isinstance(result, GroupResult): + if self.serializer != 'pickle' and isinstance(result, ResultBase): return result.as_tuple() return result @@ -153,7 +186,9 @@ class BaseBackend(object): content_encoding=self.content_encoding, accept=self.accept) - def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5): + def wait_for(self, task_id, + timeout=None, propagate=True, interval=0.5, no_ack=True, + on_interval=None): """Wait for task and return its result. If the task raises an exception, this exception @@ -176,6 +211,8 @@ class BaseBackend(object): if propagate: raise result return result + if on_interval: + on_interval() # avoid hammering the CPU checking status. time.sleep(interval) time_elapsed += interval @@ -302,7 +339,7 @@ class BaseBackend(object): def on_task_call(self, producer, task_id): return {} - def on_chord_part_return(self, task, propagate=False): + def on_chord_part_return(self, task, state, result, propagate=False): pass def fallback_chord_unlock(self, group_id, body, result=None, @@ -365,17 +402,26 @@ class KeyValueStoreBackend(BaseBackend): def expire(self, key, value): pass - def get_key_for_task(self, task_id): + def get_key_for_task(self, task_id, key=''): """Get the cache key for a task by id.""" - return self.task_keyprefix + self.key_t(task_id) + key_t = self.key_t + return key_t('').join([ + self.task_keyprefix, key_t(task_id), key_t(key), + ]) - def get_key_for_group(self, group_id): + def get_key_for_group(self, group_id, key=''): """Get the cache key for a group by id.""" - return self.group_keyprefix + self.key_t(group_id) + key_t = self.key_t + return key_t('').join([ + self.group_keyprefix, key_t(group_id), key_t(key), + ]) - def get_key_for_chord(self, group_id): + def get_key_for_chord(self, group_id, key=''): """Get the cache key for the chord waiting on group with given id.""" - return self.chord_keyprefix + self.key_t(group_id) + key_t = self.key_t + return key_t('').join([ + self.chord_keyprefix, key_t(group_id), key_t(key), + ]) def _strip_prefix(self, key): """Takes bytes, emits string.""" @@ -397,7 +443,7 @@ class KeyValueStoreBackend(BaseBackend): for i, value in enumerate(values) if value is not None) - def get_many(self, task_ids, timeout=None, interval=0.5, + def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True, READY_STATES=states.READY_STATES): interval = 0.5 if interval is None else interval ids = task_ids if isinstance(task_ids, set) else set(task_ids) @@ -470,14 +516,12 @@ class KeyValueStoreBackend(BaseBackend): self.save_group(group_id, self.app.GroupResult(group_id, result)) return header(*partial_args, task_id=group_id) - def on_chord_part_return(self, task, propagate=None): + def on_chord_part_return(self, task, state, result, propagate=None): if not self.implements_incr: return - from celery import maybe_signature - from celery.result import GroupResult, allow_join_result app = self.app if propagate is None: - propagate = self.app.conf.CELERY_CHORD_PROPAGATES + propagate = app.conf.CELERY_CHORD_PROPAGATES gid = task.request.group if not gid: return @@ -485,26 +529,26 @@ class KeyValueStoreBackend(BaseBackend): try: deps = GroupResult.restore(gid, backend=task.backend) except Exception as exc: - callback = maybe_signature(task.request.chord, app=self.app) - return app._tasks[callback.task].backend.fail_from_current_stack( - callback.id, - exc=ChordError('Cannot restore group: {0!r}'.format(exc)), + callback = maybe_signature(task.request.chord, app=app) + logger.error('Chord %r raised: %r', gid, exc, exc_info=1) + return self.chord_error_from_stack( + callback, + ChordError('Cannot restore group: {0!r}'.format(exc)), ) if deps is None: try: raise ValueError(gid) except ValueError as exc: - callback = maybe_signature(task.request.chord, app=self.app) - task = app._tasks[callback.task] - return task.backend.fail_from_current_stack( - callback.id, - exc=ChordError('GroupResult {0} no longer exists'.format( - gid, - )) + callback = maybe_signature(task.request.chord, app=app) + logger.error('Chord callback %r raised: %r', gid, exc, + exc_info=1) + return self.chord_error_from_stack( + callback, + ChordError('GroupResult {0} no longer exists'.format(gid)), ) val = self.incr(key) if val >= len(deps): - callback = maybe_signature(task.request.chord, app=self.app) + callback = maybe_signature(task.request.chord, app=app) j = deps.join_native if deps.supports_native_join else deps.join try: with allow_join_result(): @@ -518,16 +562,16 @@ class KeyValueStoreBackend(BaseBackend): except StopIteration: reason = repr(exc) - app._tasks[callback.task].backend.fail_from_current_stack( - callback.id, exc=ChordError(reason), - ) + logger.error('Chord %r raised: %r', gid, reason, exc_info=1) + self.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: - app._tasks[callback.task].backend.fail_from_current_stack( - callback.id, - exc=ChordError('Callback error: {0!r}'.format(exc)), + logger.error('Chord %r raised: %r', gid, exc, exc_info=1) + self.chord_error_from_stack( + callback, + ChordError('Callback error: {0!r}'.format(exc)), ) finally: deps.delete() diff --git a/awx/lib/site-packages/celery/backends/database/__init__.py b/awx/lib/site-packages/celery/backends/database/__init__.py index 58109e7824..c52e75879b 100644 --- a/awx/lib/site-packages/celery/backends/database/__init__.py +++ b/awx/lib/site-packages/celery/backends/database/__init__.py @@ -8,17 +8,21 @@ """ from __future__ import absolute_import +import logging +from contextlib import contextmanager from functools import wraps from celery import states +from celery.backends.base import BaseBackend from celery.exceptions import ImproperlyConfigured from celery.five import range from celery.utils.timeutils import maybe_timedelta -from celery.backends.base import BaseBackend +from .models import Task +from .models import TaskSet +from .session import SessionManager -from .models import Task, TaskSet -from .session import ResultSession +logger = logging.getLogger(__name__) __all__ = ['DatabaseBackend'] @@ -33,7 +37,19 @@ def _sqlalchemy_installed(): return sqlalchemy _sqlalchemy_installed() -from sqlalchemy.exc import DatabaseError, OperationalError +from sqlalchemy.exc import DatabaseError, InvalidRequestError +from sqlalchemy.orm.exc import StaleDataError + + +@contextmanager +def session_cleanup(session): + try: + yield + except Exception: + session.rollback() + raise + finally: + session.close() def retry(fun): @@ -45,7 +61,12 @@ def retry(fun): for retries in range(max_retries): try: return fun(*args, **kwargs) - except (DatabaseError, OperationalError): + except (DatabaseError, InvalidRequestError, StaleDataError): + logger.warning( + "Failed operation %s. Retrying %s more times.", + fun.__name__, max_retries - retries - 1, + exc_info=True, + ) if retries + 1 >= max_retries: raise @@ -83,8 +104,8 @@ class DatabaseBackend(BaseBackend): 'Missing connection string! Do you have ' 'CELERY_RESULT_DBURI set to a real value?') - def ResultSession(self): - return ResultSession( + def ResultSession(self, session_manager=SessionManager()): + return session_manager.session_factory( dburi=self.dburi, short_lived_sessions=self.short_lived_sessions, **self.engine_options @@ -95,8 +116,9 @@ class DatabaseBackend(BaseBackend): traceback=None, max_retries=3, **kwargs): """Store return value and status of an executed task.""" session = self.ResultSession() - try: - task = session.query(Task).filter(Task.task_id == task_id).first() + with session_cleanup(session): + task = list(session.query(Task).filter(Task.task_id == task_id)) + task = task and task[0] if not task: task = Task(task_id) session.add(task) @@ -106,83 +128,70 @@ class DatabaseBackend(BaseBackend): task.traceback = traceback session.commit() return result - finally: - session.close() @retry def _get_task_meta_for(self, task_id): """Get task metadata for a task by id.""" session = self.ResultSession() - try: - task = session.query(Task).filter(Task.task_id == task_id).first() - if task is None: + with session_cleanup(session): + task = list(session.query(Task).filter(Task.task_id == task_id)) + task = task and task[0] + if not task: task = Task(task_id) task.status = states.PENDING task.result = None return task.to_dict() - finally: - session.close() @retry def _save_group(self, group_id, result): """Store the result of an executed group.""" session = self.ResultSession() - try: + with session_cleanup(session): group = TaskSet(group_id, result) session.add(group) session.flush() session.commit() return result - finally: - session.close() @retry def _restore_group(self, group_id): """Get metadata for group by id.""" session = self.ResultSession() - try: + with session_cleanup(session): group = session.query(TaskSet).filter( TaskSet.taskset_id == group_id).first() if group: return group.to_dict() - finally: - session.close() @retry def _delete_group(self, group_id): """Delete metadata for group by id.""" session = self.ResultSession() - try: + with session_cleanup(session): session.query(TaskSet).filter( TaskSet.taskset_id == group_id).delete() session.flush() session.commit() - finally: - session.close() @retry def _forget(self, task_id): """Forget about result.""" session = self.ResultSession() - try: + with session_cleanup(session): session.query(Task).filter(Task.task_id == task_id).delete() session.commit() - finally: - session.close() def cleanup(self): """Delete expired metadata.""" session = self.ResultSession() expires = self.expires now = self.app.now() - try: + with session_cleanup(session): session.query(Task).filter( Task.date_done < (now - expires)).delete() session.query(TaskSet).filter( TaskSet.date_done < (now - expires)).delete() session.commit() - finally: - session.close() def __reduce__(self, args=(), kwargs={}): kwargs.update( diff --git a/awx/lib/site-packages/celery/backends/database/session.py b/awx/lib/site-packages/celery/backends/database/session.py index fef3843e4f..1575d7f323 100644 --- a/awx/lib/site-packages/celery/backends/database/session.py +++ b/awx/lib/site-packages/celery/backends/database/session.py @@ -8,58 +8,55 @@ """ from __future__ import absolute_import -from collections import defaultdict -from multiprocessing.util import register_after_fork +from billiard.util import register_after_fork from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +from sqlalchemy.pool import NullPool ResultModelBase = declarative_base() -_SETUP = defaultdict(lambda: False) -_ENGINES = {} -_SESSIONS = {} - -__all__ = ['ResultSession', 'get_engine', 'create_session'] +__all__ = ['SessionManager'] -class _after_fork(object): - registered = False +class SessionManager(object): + def __init__(self): + self._engines = {} + self._sessions = {} + self.forked = False + self.prepared = False + register_after_fork(self, self._after_fork) - def __call__(self): - self.registered = False # child must reregister - for engine in list(_ENGINES.values()): - engine.dispose() - _ENGINES.clear() - _SESSIONS.clear() -after_fork = _after_fork() + def _after_fork(self,): + self.forked = True + def get_engine(self, dburi, **kwargs): + if self.forked: + try: + return self._engines[dburi] + except KeyError: + engine = self._engines[dburi] = create_engine(dburi, **kwargs) + return engine + else: + kwargs['poolclass'] = NullPool + return create_engine(dburi, **kwargs) -def get_engine(dburi, **kwargs): - try: - return _ENGINES[dburi] - except KeyError: - engine = _ENGINES[dburi] = create_engine(dburi, **kwargs) - after_fork.registered = True - register_after_fork(after_fork, after_fork) - return engine + def create_session(self, dburi, short_lived_sessions=False, **kwargs): + engine = self.get_engine(dburi, **kwargs) + if self.forked: + if short_lived_sessions or dburi not in self._sessions: + self._sessions[dburi] = sessionmaker(bind=engine) + return engine, self._sessions[dburi] + else: + return engine, sessionmaker(bind=engine) + def prepare_models(self, engine): + if not self.prepared: + ResultModelBase.metadata.create_all(engine) + self.prepared = True -def create_session(dburi, short_lived_sessions=False, **kwargs): - engine = get_engine(dburi, **kwargs) - if short_lived_sessions or dburi not in _SESSIONS: - _SESSIONS[dburi] = sessionmaker(bind=engine) - return engine, _SESSIONS[dburi] - - -def setup_results(engine): - if not _SETUP['results']: - ResultModelBase.metadata.create_all(engine) - _SETUP['results'] = True - - -def ResultSession(dburi, **kwargs): - engine, session = create_session(dburi, **kwargs) - setup_results(engine) - return session() + def session_factory(self, dburi, **kwargs): + engine, session = self.create_session(dburi, **kwargs) + self.prepare_models(engine) + return session() diff --git a/awx/lib/site-packages/celery/backends/redis.py b/awx/lib/site-packages/celery/backends/redis.py index f7b36ee6e8..314b1d2b8f 100644 --- a/awx/lib/site-packages/celery/backends/redis.py +++ b/awx/lib/site-packages/celery/backends/redis.py @@ -8,22 +8,30 @@ """ from __future__ import absolute_import -from kombu.utils import cached_property +from functools import partial + +from kombu.utils import cached_property, retry_over_time from kombu.utils.url import _parse_url -from celery.exceptions import ImproperlyConfigured +from celery import states +from celery.canvas import maybe_signature +from celery.exceptions import ChordError, ImproperlyConfigured from celery.five import string_t -from celery.utils import deprecated_property +from celery.utils import deprecated_property, strtobool from celery.utils.functional import dictfilter +from celery.utils.log import get_logger +from celery.utils.timeutils import humanize_seconds from .base import KeyValueStoreBackend try: import redis from redis.exceptions import ConnectionError -except ImportError: # pragma: no cover - redis = None # noqa - ConnectionError = None # noqa + from kombu.transport.redis import get_redis_error_classes +except ImportError: # pragma: no cover + redis = None # noqa + ConnectionError = None # noqa + get_redis_error_classes = None # noqa __all__ = ['RedisBackend'] @@ -31,6 +39,9 @@ REDIS_MISSING = """\ You need to install the redis library in order to use \ the Redis result store backend.""" +logger = get_logger(__name__) +error = logger.error + class RedisBackend(KeyValueStoreBackend): """Redis task result store.""" @@ -46,7 +57,8 @@ class RedisBackend(KeyValueStoreBackend): implements_incr = True def __init__(self, host=None, port=None, db=None, password=None, - expires=None, max_connections=None, url=None, **kwargs): + expires=None, max_connections=None, url=None, + connection_pool=None, new_join=False, **kwargs): super(RedisBackend, self).__init__(**kwargs) conf = self.app.conf if self.redis is None: @@ -66,6 +78,7 @@ class RedisBackend(KeyValueStoreBackend): self.max_connections = ( max_connections or _get('MAX_CONNECTIONS') or self.max_connections ) + self._ConnectionPool = connection_pool self.connparams = { 'host': _get('HOST') or 'localhost', @@ -79,6 +92,18 @@ class RedisBackend(KeyValueStoreBackend): self.url = url self.expires = self.prepare_expires(expires, type=int) + try: + new_join = strtobool(self.connparams.pop('new_join')) + except KeyError: + pass + if new_join: + self.apply_chord = self._new_chord_apply + self.on_chord_part_return = self._new_chord_return + + self.connection_errors, self.channel_errors = ( + get_redis_error_classes() if get_redis_error_classes + else ((), ())) + def _params_from_url(self, url, defaults): scheme, host, port, user, password, path, query = _parse_url(url) connparams = dict( @@ -115,13 +140,33 @@ class RedisBackend(KeyValueStoreBackend): def mget(self, keys): return self.client.mget(keys) - def set(self, key, value): - client = self.client + def ensure(self, fun, args, **policy): + retry_policy = dict(self.retry_policy, **policy) + max_retries = retry_policy.get('max_retries') + return retry_over_time( + fun, self.connection_errors, args, {}, + partial(self.on_connection_error, max_retries), + **retry_policy + ) + + def on_connection_error(self, max_retries, exc, intervals, retries): + tts = next(intervals) + error('Connection to Redis lost: Retry (%s/%s) %s.', + retries, max_retries or 'Inf', + humanize_seconds(tts, 'in ')) + return tts + + def set(self, key, value, **retry_policy): + return self.ensure(self._set, (key, value), **retry_policy) + + def _set(self, key, value): + pipe = self.client.pipeline() if self.expires: - client.setex(key, value, self.expires) + pipe.setex(key, value, self.expires) else: - client.set(key, value) - client.publish(key, value) + pipe.set(key, value) + pipe.publish(key, value) + pipe.execute() def delete(self, key): self.client.delete(key) @@ -132,10 +177,77 @@ class RedisBackend(KeyValueStoreBackend): def expire(self, key, value): return self.client.expire(key, value) + def _unpack_chord_result(self, tup, decode, + PROPAGATE_STATES=states.PROPAGATE_STATES): + _, tid, state, retval = decode(tup) + if state in PROPAGATE_STATES: + raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval)) + return retval + + def _new_chord_apply(self, header, partial_args, group_id, body, + result=None, **options): + # avoids saving the group in the redis db. + return header(*partial_args, task_id=group_id) + + def _new_chord_return(self, task, state, result, propagate=None, + PROPAGATE_STATES=states.PROPAGATE_STATES): + app = self.app + if propagate is None: + propagate = self.app.conf.CELERY_CHORD_PROPAGATES + request = task.request + tid, gid = request.id, request.group + if not gid or not tid: + return + + client = self.client + jkey = self.get_key_for_group(gid, '.j') + result = self.encode_result(result, state) + _, readycount, _ = client.pipeline() \ + .rpush(jkey, self.encode([1, tid, state, result])) \ + .llen(jkey) \ + .expire(jkey, 86400) \ + .execute() + + try: + callback = maybe_signature(request.chord, app=app) + total = callback['chord_size'] + if readycount >= total: + decode, unpack = self.decode, self._unpack_chord_result + resl, _ = client.pipeline() \ + .lrange(jkey, 0, total) \ + .delete(jkey) \ + .execute() + try: + callback.delay([unpack(tup, decode) for tup in resl]) + except Exception as exc: + error('Chord callback for %r raised: %r', + request.group, exc, exc_info=1) + app._tasks[callback.task].backend.fail_from_current_stack( + callback.id, + exc=ChordError('Callback error: {0!r}'.format(exc)), + ) + except ChordError as exc: + error('Chord %r raised: %r', request.group, exc, exc_info=1) + app._tasks[callback.task].backend.fail_from_current_stack( + callback.id, exc=exc, + ) + except Exception as exc: + error('Chord %r raised: %r', request.group, exc, exc_info=1) + app._tasks[callback.task].backend.fail_from_current_stack( + callback.id, exc=ChordError('Join error: {0!r}'.format(exc)), + ) + + @property + def ConnectionPool(self): + if self._ConnectionPool is None: + self._ConnectionPool = self.redis.ConnectionPool + return self._ConnectionPool + @cached_property def client(self): return self.redis.Redis( - connection_pool=self.redis.ConnectionPool(**self.connparams)) + connection_pool=self.ConnectionPool(**self.connparams), + ) def __reduce__(self, args=(), kwargs={}): return super(RedisBackend, self).__reduce__( diff --git a/awx/lib/site-packages/celery/beat.py b/awx/lib/site-packages/celery/beat.py index f93e3bbf9b..8205c27812 100644 --- a/awx/lib/site-packages/celery/beat.py +++ b/awx/lib/site-packages/celery/beat.py @@ -161,17 +161,24 @@ class Scheduler(object): #: How often to sync the schedule (3 minutes by default) sync_every = 3 * 60 + #: How many tasks can be called before a sync is forced. + sync_every_tasks = None + _last_sync = None + _tasks_since_sync = 0 logger = logger # compat def __init__(self, app, schedule=None, max_interval=None, - Publisher=None, lazy=False, **kwargs): + Publisher=None, lazy=False, sync_every_tasks=None, **kwargs): self.app = app self.data = maybe_evaluate({} if schedule is None else schedule) self.max_interval = (max_interval or app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or self.max_interval) + self.sync_every_tasks = ( + app.conf.CELERYBEAT_SYNC_EVERY if sync_every_tasks is None + else sync_every_tasks) self.Publisher = Publisher or app.amqp.TaskProducer if not lazy: self.setup_schedule() @@ -219,8 +226,12 @@ class Scheduler(object): return min(remaining_times + [self.max_interval]) def should_sync(self): - return (not self._last_sync or - (monotonic() - self._last_sync) > self.sync_every) + return ( + (not self._last_sync or + (monotonic() - self._last_sync) > self.sync_every) or + (self.sync_every_tasks and + self._tasks_since_sync >= self.sync_every_tasks) + ) def reserve(self, entry): new_entry = self.schedule[entry.name] = next(entry) @@ -247,6 +258,7 @@ class Scheduler(object): "Couldn't apply scheduled task {0.name}: {exc}".format( entry, exc=exc)), sys.exc_info()[2]) finally: + self._tasks_since_sync += 1 if self.should_sync(): self._do_sync() return result @@ -263,6 +275,7 @@ class Scheduler(object): self.sync() finally: self._last_sync = monotonic() + self._tasks_since_sync = 0 def sync(self): pass @@ -352,7 +365,6 @@ class PersistentScheduler(Scheduler): try: self._store = self.persistence.open(self.schedule_filename, writeback=True) - entries = self._store.setdefault('entries', {}) except Exception as exc: error('Removing corrupted schedule file %r: %r', self.schedule_filename, exc, exc_info=True) @@ -360,15 +372,21 @@ class PersistentScheduler(Scheduler): self._store = self.persistence.open(self.schedule_filename, writeback=True) else: - if '__version__' not in self._store: - warning('Reset: Account for new __version__ field') - self._store.clear() # remove schedule at 2.2.2 upgrade. - if 'tz' not in self._store: - warning('Reset: Account for new tz field') - self._store.clear() # remove schedule at 3.0.8 upgrade - if 'utc_enabled' not in self._store: - warning('Reset: Account for new utc_enabled field') - self._store.clear() # remove schedule at 3.0.9 upgrade + try: + self._store['entries'] + except KeyError: + # new schedule db + self._store['entries'] = {} + else: + if '__version__' not in self._store: + warning('DB Reset: Account for new __version__ field') + self._store.clear() # remove schedule at 2.2.2 upgrade. + elif 'tz' not in self._store: + warning('DB Reset: Account for new tz field') + self._store.clear() # remove schedule at 3.0.8 upgrade + elif 'utc_enabled' not in self._store: + warning('DB Reset: Account for new utc_enabled field') + self._store.clear() # remove schedule at 3.0.9 upgrade tz = self.app.conf.CELERY_TIMEZONE stored_tz = self._store.get('tz') diff --git a/awx/lib/site-packages/celery/bin/amqp.py b/awx/lib/site-packages/celery/bin/amqp.py index c14f91d1a8..4dab1527a4 100644 --- a/awx/lib/site-packages/celery/bin/amqp.py +++ b/awx/lib/site-packages/celery/bin/amqp.py @@ -15,7 +15,6 @@ import pprint from functools import partial from itertools import count -from amqp import Message from kombu.utils.encoding import safe_str from celery.utils.functional import padlist @@ -175,7 +174,7 @@ class AMQShell(cmd.Cmd): 'basic.get': Spec(('queue', str), ('no_ack', bool, 'off'), returns=dump_message), - 'basic.publish': Spec(('msg', Message), + 'basic.publish': Spec(('msg', str), ('exchange', str), ('routing_key', str), ('mandatory', bool, 'no'), diff --git a/awx/lib/site-packages/celery/bin/base.py b/awx/lib/site-packages/celery/bin/base.py index 2a27c68384..9ad7946659 100644 --- a/awx/lib/site-packages/celery/bin/base.py +++ b/awx/lib/site-packages/celery/bin/base.py @@ -68,7 +68,6 @@ from __future__ import absolute_import, print_function, unicode_literals import os import random import re -import socket import sys import warnings import json @@ -86,9 +85,14 @@ from celery.five import items, string, string_t from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE from celery.utils import term from celery.utils import text -from celery.utils import NODENAME_DEFAULT, nodesplit +from celery.utils import node_format, host_format from celery.utils.imports import symbol_by_name, import_from_cwd +try: + input = raw_input +except NameError: + pass + # always enable DeprecationWarnings, so our users can see them. for warning in (CDeprecationWarning, CPendingDeprecationWarning): warnings.simplefilter('once', warning, 0) @@ -101,7 +105,6 @@ Try --help? find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)') find_rst_ref = re.compile(r':\w+:`(.+?)`') -find_sformat = re.compile(r'%(\w)') __all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter', 'Command', 'Option', 'daemon_options'] @@ -236,8 +239,8 @@ class Command(object): self.get_app = get_app or self._get_default_app self.stdout = stdout or sys.stdout self.stderr = stderr or sys.stderr - self.no_color = no_color - self.colored = term.colored(enabled=not self.no_color) + self._colored = None + self._no_color = no_color self.quiet = quiet if not self.description: self.description = self.__doc__ @@ -325,6 +328,34 @@ class Command(object): return os.path.expanduser(value) return value + def ask(self, q, choices, default=None): + """Prompt user to choose from a tuple of string values. + + :param q: the question to ask (do not include questionark) + :param choice: tuple of possible choices, must be lowercase. + :param default: Default value if any. + + If a default is not specified the question will be repeated + until the user gives a valid choice. + + Matching is done case insensitively. + + """ + schoices = choices + if default is not None: + schoices = [c.upper() if c == default else c.lower() + for c in choices] + schoices = '/'.join(schoices) + + p = '{0} ({1})? '.format(q.capitalize(), schoices) + while 1: + val = input(p).lower() + if val in choices: + return val + elif default is not None: + break + return default + def handle_argv(self, prog_name, argv, command=None): """Parse command-line arguments from ``argv`` and dispatch to :meth:`run`. @@ -405,8 +436,10 @@ class Command(object): quiet = preload_options.get('quiet') if quiet is not None: self.quiet = quiet - self.colored.enabled = \ - not preload_options.get('no_color', self.no_color) + try: + self.no_color = preload_options['no_color'] + except KeyError: + pass workdir = preload_options.get('working_directory') if workdir: os.chdir(workdir) @@ -427,7 +460,7 @@ class Command(object): if config: os.environ['CELERY_CONFIG_MODULE'] = config if self.respects_app_option: - if app and self.respects_app_option: + if app: self.app = self.find_app(app) elif self.app is None: self.app = self.get_app(loader=loader) @@ -495,7 +528,12 @@ class Command(object): opt = opts.get(arg) if opt: if opt.takes_value(): - acc[opt.dest] = args[index + 1] + try: + acc[opt.dest] = args[index + 1] + except IndexError: + raise ValueError( + 'Missing required argument for {0}'.format( + arg)) index += 1 elif opt.action == 'store_true': acc[opt.dest] = True @@ -526,20 +564,10 @@ class Command(object): pass def node_format(self, s, nodename, **extra): - name, host = nodesplit(nodename) - return self._simple_format( - s, host, n=name or NODENAME_DEFAULT, **extra) + return node_format(s, nodename, **extra) - def simple_format(self, s, **extra): - return self._simple_format(s, socket.gethostname(), **extra) - - def _simple_format(self, s, host, - match=find_sformat, expand=r'\1', **keys): - if s: - name, _, domain = host.partition('.') - keys = dict({'%': '%', 'h': host, 'n': name, 'd': domain}, **keys) - return match.sub(lambda m: keys[m.expand(expand)], s) - return s + def host_format(self, s, **extra): + return host_format(s, **extra) def _get_default_app(self, *args, **kwargs): from celery._state import get_current_app @@ -593,6 +621,26 @@ class Command(object): if body and self.show_body: self.out(body) + @property + def colored(self): + if self._colored is None: + self._colored = term.colored(enabled=not self.no_color) + return self._colored + + @colored.setter + def colored(self, obj): + self._colored = obj + + @property + def no_color(self): + return self._no_color + + @no_color.setter + def no_color(self, value): + self._no_color = value + if self._colored is not None: + self._colored.enabled = not self._no_color + def daemon_options(default_pidfile=None, default_logfile=None): return ( diff --git a/awx/lib/site-packages/celery/bin/beat.py b/awx/lib/site-packages/celery/bin/beat.py index 3049668b9d..6b5b734682 100644 --- a/awx/lib/site-packages/celery/bin/beat.py +++ b/awx/lib/site-packages/celery/bin/beat.py @@ -24,7 +24,7 @@ The :program:`celery beat` command. Scheduler class to use. Default is :class:`celery.beat.PersistentScheduler`. -.. cmdoption:: max-interval +.. cmdoption:: --max-interval Max seconds to sleep between schedule iterations. diff --git a/awx/lib/site-packages/celery/bin/celery.py b/awx/lib/site-packages/celery/bin/celery.py index 3b35bb10c2..10d7c03243 100644 --- a/awx/lib/site-packages/celery/bin/celery.py +++ b/awx/lib/site-packages/celery/bin/celery.py @@ -9,6 +9,7 @@ The :program:`celery` umbrella command. from __future__ import absolute_import, unicode_literals import anyjson +import numbers import os import sys @@ -61,7 +62,7 @@ if DEBUG: # pragma: no cover def determine_exit_status(ret): - if isinstance(ret, int): + if isinstance(ret, numbers.Integral): return ret return EX_OK if ret else EX_FAILURE @@ -91,7 +92,8 @@ class multi(Command): def run_from_argv(self, prog_name, argv, command=None): from celery.bin.multi import MultiTool - return MultiTool().execute_from_commandline( + multi = MultiTool(quiet=self.quiet, no_color=self.no_color) + return multi.execute_from_commandline( [command] + argv, prog_name, ) @@ -195,17 +197,35 @@ class purge(Command): WARNING: There is no undo operation for this command. """ + warn_prelude = ( + '{warning}: This will remove all tasks from {queues}: {names}.\n' + ' There is no undo for this operation!\n\n' + '(to skip this prompt use the -f option)\n' + ) + warn_prompt = 'Are you sure you want to delete all tasks' fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.' fmt_empty = 'No messages purged from {qnum} {queues}' + option_list = Command.option_list + ( + Option('--force', '-f', action='store_true', + help='Do not prompt for verification'), + ) - def run(self, *args, **kwargs): - queues = len(self.app.amqp.queues) + def run(self, force=False, **kwargs): + names = list(sorted(self.app.amqp.queues.keys())) + qnum = len(names) + if not force: + self.out(self.warn_prelude.format( + warning=self.colored.red('WARNING'), + queues=text.pluralize(qnum, 'queue'), names=', '.join(names), + )) + if self.ask(self.warn_prompt, ('yes', 'no'), 'no') != 'yes': + return messages = self.app.control.purge() fmt = self.fmt_purged if messages else self.fmt_empty self.out(fmt.format( - mnum=messages, qnum=queues, + mnum=messages, qnum=qnum, messages=text.pluralize(messages, 'message'), - queues=text.pluralize(queues, 'queue'))) + queues=text.pluralize(qnum, 'queue'))) class result(Command): @@ -295,7 +315,7 @@ class _RemoteControl(Command): command, self.args) def call(self, *args, **kwargs): - raise NotImplementedError('get_obj') + raise NotImplementedError('call') def run(self, *args, **kwargs): if not args: @@ -365,10 +385,10 @@ class inspect(_RemoteControl): return getattr(i, method)(*args) def objgraph(self, type_='Request', *args, **kwargs): - return self.call('objgraph', type_) + return self.call('objgraph', type_, **kwargs) def conf(self, with_defaults=False, *args, **kwargs): - return self.call('conf', with_defaults=with_defaults) + return self.call('conf', with_defaults, **kwargs) class control(_RemoteControl): @@ -548,7 +568,8 @@ class shell(Command): # pragma: no cover 'chunks': celery.chunks, 'xmap': celery.xmap, 'xstarmap': celery.xstarmap, - 'subtask': celery.subtask} + 'subtask': celery.subtask, + 'signature': celery.signature} if not without_tasks: self.locals.update(dict( @@ -611,8 +632,10 @@ class help(Command): def run(self, *args, **kwargs): self.parser.print_help() - self.out(HELP.format(prog_name=self.prog_name, - commands=CeleryCommand.list_commands())) + self.out(HELP.format( + prog_name=self.prog_name, + commands=CeleryCommand.list_commands(colored=self.colored), + )) return EX_USAGE @@ -665,6 +688,7 @@ class CeleryCommand(Command): try: return cls( app=self.app, on_error=self.on_error, + no_color=self.no_color, quiet=self.quiet, on_usage_error=partial(self.on_usage_error, command=command), ).run_from_argv(self.prog_name, argv[1:], command=argv[0]) except self.UsageError as exc: @@ -679,7 +703,7 @@ class CeleryCommand(Command): helps = '{self.prog_name} {command} --help' else: helps = '{self.prog_name} --help' - self.error(self.colored.magenta("Error: {0}".format(exc))) + self.error(self.colored.magenta('Error: {0}'.format(exc))) self.error("""Please try '{0}'""".format(helps.format( self=self, command=command, ))) @@ -692,11 +716,33 @@ class CeleryCommand(Command): if value.startswith('--'): rest.append(value) elif value.startswith('-'): - rest.extend([value] + [argv[index + 1]]) - index += 1 + # we eat the next argument even though we don't know + # if this option takes an argument or not. + # instead we will assume what is the command name in the + # return statements below. + try: + nxt = argv[index + 1] + if nxt.startswith('-'): + # is another option + rest.append(value) + else: + # is (maybe) a value for this option + rest.extend([value, nxt]) + index += 1 + except IndexError: + rest.append(value) + break else: - return argv[index:] + rest + break index += 1 + if argv[index:]: + # if there are more arguments left then divide and swap + # we assume the first argument in argv[i:] is the command + # name. + return argv[index:] + rest + # if there are no more arguments then the last arg in rest' + # must be the command. + [rest.pop()] + rest return [] def prepare_prog_name(self, name): @@ -725,8 +771,9 @@ class CeleryCommand(Command): sys.exit(EX_FAILURE) @classmethod - def get_command_info(self, command, indent=0, color=None): - colored = term.colored().names[color] if color else lambda x: x + def get_command_info(self, command, indent=0, color=None, colored=None): + colored = term.colored() if colored is None else colored + colored = colored.names[color] if color else lambda x: x obj = self.commands[command] cmd = 'celery {0}'.format(colored(command)) if obj.leaf: @@ -738,14 +785,16 @@ class CeleryCommand(Command): ]) @classmethod - def list_commands(self, indent=0): - white = term.colored().white + def list_commands(self, indent=0, colored=None): + colored = term.colored() if colored is None else colored + white = colored.white ret = [] for cls, commands, color in command_classes: ret.extend([ text.indent('+ {0}: '.format(white(cls)), indent), - '\n'.join(self.get_command_info(command, indent + 4, color) - for command in commands), + '\n'.join( + self.get_command_info(command, indent + 4, color, colored) + for command in commands), '' ]) return '\n'.join(ret).strip() diff --git a/awx/lib/site-packages/celery/bin/events.py b/awx/lib/site-packages/celery/bin/events.py index 2bca74b65b..d98750504c 100644 --- a/awx/lib/site-packages/celery/bin/events.py +++ b/awx/lib/site-packages/celery/bin/events.py @@ -57,14 +57,14 @@ class events(Command): celery events -d --app=proj dump events to screen. celery events -b amqp:// - celery events -C [options] + celery events -c [options] run snapshot camera. Examples:: celery events celery events -d - celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info + celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info """ doc = __doc__ supports_args = False diff --git a/awx/lib/site-packages/celery/bin/multi.py b/awx/lib/site-packages/celery/bin/multi.py index 9e0789d986..ca14c0bfed 100644 --- a/awx/lib/site-packages/celery/bin/multi.py +++ b/awx/lib/site-packages/celery/bin/multi.py @@ -478,6 +478,18 @@ def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name) + # Numbers in args always refers to the index in the list of names. + # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). + for ns_name, ns_opts in list(items(p.namespaces)): + if ns_name.isdigit(): + ns_index = int(ns_name) - 1 + if ns_index < 0: + raise KeyError('Indexes start at 1 got: %r' % (ns_name, )) + try: + p.namespaces[names[ns_index]].update(ns_opts) + except IndexError: + raise KeyError('No node at index %r' % (ns_name, )) + for name in names: this_suffix = suffix if '@' in name: diff --git a/awx/lib/site-packages/celery/bin/worker.py b/awx/lib/site-packages/celery/bin/worker.py index f18b85c3ef..bdc564d4f7 100644 --- a/awx/lib/site-packages/celery/bin/worker.py +++ b/awx/lib/site-packages/celery/bin/worker.py @@ -171,7 +171,7 @@ class worker(Command): # parse options before detaching so errors can be handled. options, args = self.prepare_args( *self.parse_options(prog_name, argv, command)) - self.maybe_detach([command] + argv) + self.maybe_detach([command] + sys.argv[1:]) return self(*args, **options) def maybe_detach(self, argv, dopts=['-D', '--detach']): @@ -192,7 +192,7 @@ class worker(Command): if self.app.IS_WINDOWS and kwargs.get('beat'): self.die('-B option does not work on Windows. ' 'Please run celery beat as a separate service.') - hostname = self.simple_format(default_nodename(hostname)) + hostname = self.host_format(default_nodename(hostname)) if loglevel: try: loglevel = mlevel(loglevel) @@ -203,7 +203,7 @@ class worker(Command): return self.app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, - logfile=self.node_format(logfile, hostname), + logfile=logfile, # node format handled by celery.app.log.setup pidfile=self.node_format(pidfile, hostname), state_db=self.node_format(state_db, hostname), **kwargs ).start() diff --git a/awx/lib/site-packages/celery/bootsteps.py b/awx/lib/site-packages/celery/bootsteps.py index 7796afb60e..9c0427fe69 100644 --- a/awx/lib/site-packages/celery/bootsteps.py +++ b/awx/lib/site-packages/celery/bootsteps.py @@ -392,7 +392,7 @@ class StartStopStep(Step): class ConsumerStep(StartStopStep): - requires = ('Connection', ) + requires = ('celery.worker.consumer:Connection', ) consumers = None def get_consumers(self, channel): diff --git a/awx/lib/site-packages/celery/canvas.py b/awx/lib/site-packages/celery/canvas.py index 6b323776cf..cabc5070c8 100644 --- a/awx/lib/site-packages/celery/canvas.py +++ b/awx/lib/site-packages/celery/canvas.py @@ -194,7 +194,7 @@ class Signature(dict): return s partial = clone - def freeze(self, _id=None): + def freeze(self, _id=None, group_id=None, chord=None): opts = self.options try: tid = opts['task_id'] @@ -202,6 +202,10 @@ class Signature(dict): tid = opts['task_id'] = _id or uuid() if 'reply_to' not in opts: opts['reply_to'] = self.app.oid + if group_id: + opts['group_id'] = group_id + if chord: + opts['chord'] = chord return self.AsyncResult(tid) _freeze = freeze @@ -502,16 +506,20 @@ class group(Signature): def __call__(self, *partial_args, **options): return self.apply_async(partial_args, **options) - def freeze(self, _id=None): + def freeze(self, _id=None, group_id=None, chord=None): opts = self.options try: gid = opts['task_id'] except KeyError: gid = opts['task_id'] = uuid() + if group_id: + opts['group_id'] = group_id + if chord: + opts['chord'] = group_id new_tasks, results = [], [] for task in self.tasks: task = maybe_signature(task, app=self._app).clone() - results.append(task._freeze()) + results.append(task.freeze(group_id=group_id, chord=chord)) new_tasks.append(task) self.tasks = self.kwargs['tasks'] = new_tasks return self.app.GroupResult(gid, results) @@ -552,6 +560,9 @@ class chord(Signature): ) self.subtask_type = 'chord' + def freeze(self, _id=None, group_id=None, chord=None): + return self.body.freeze(_id, group_id=group_id, chord=chord) + @classmethod def from_dict(self, d, app=None): args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs'])) @@ -578,7 +589,9 @@ class chord(Signature): app = self.body.type.app return app.tasks['celery.chord'] - def apply_async(self, args=(), kwargs={}, task_id=None, **options): + def apply_async(self, args=(), kwargs={}, task_id=None, + producer=None, publisher=None, connection=None, + router=None, result_cls=None, **options): body = kwargs.get('body') or self.kwargs['body'] kwargs = dict(self.kwargs, **kwargs) body = body.clone(**options) diff --git a/awx/lib/site-packages/celery/concurrency/asynpool.py b/awx/lib/site-packages/celery/concurrency/asynpool.py index 081d3118e7..5c4d5855c9 100644 --- a/awx/lib/site-packages/celery/concurrency/asynpool.py +++ b/awx/lib/site-packages/celery/concurrency/asynpool.py @@ -570,6 +570,15 @@ class AsynPool(_pool.Pool): if inq: busy_workers.discard(inq) hub_remove(proc.sentinel) + waiting_to_start.discard(proc) + self._active_writes.discard(proc.inqW_fd) + hub_remove(proc.inqW_fd) + hub_remove(proc.outqR_fd) + if proc.synqR_fd: + hub_remove(proc.synqR_fd) + if proc.synqW_fd: + self._active_writes.discard(proc.synqW_fd) + hub_remove(proc.synqW_fd) self.on_process_down = on_process_down def _create_write_handlers(self, hub, @@ -960,14 +969,13 @@ class AsynPool(_pool.Pool): return inq, outq, synq def on_process_alive(self, pid): - """Handler called when the WORKER_UP message is received + """Handler called when the :const:`WORKER_UP` message is received from a child process, which marks the process as ready to receive work.""" try: proc = next(w for w in self._pool if w.pid == pid) except StopIteration: - # process already exited :( this will be handled elsewhere. - return + return logger.warning('process with pid=%s already exited', pid) assert proc.inqW_fd not in self._fileno_to_inq assert proc.inqW_fd not in self._all_inqueues self._waiting_to_start.discard(proc) diff --git a/awx/lib/site-packages/celery/concurrency/base.py b/awx/lib/site-packages/celery/concurrency/base.py index b2ae22608d..6b3594a960 100644 --- a/awx/lib/site-packages/celery/concurrency/base.py +++ b/awx/lib/site-packages/celery/concurrency/base.py @@ -16,8 +16,10 @@ from billiard.einfo import ExceptionInfo from billiard.exceptions import WorkerLostError from kombu.utils.encoding import safe_repr +from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.five import monotonic, reraise from celery.utils import timer2 +from celery.utils.text import truncate from celery.utils.log import get_logger __all__ = ['BasePool', 'apply_target'] @@ -36,6 +38,8 @@ def apply_target(target, args=(), kwargs={}, callback=None, raise except Exception: raise + except (WorkerShutdown, WorkerTerminate): + raise except BaseException as exc: try: reraise(WorkerLostError, WorkerLostError(repr(exc)), @@ -143,7 +147,8 @@ class BasePool(object): """ if self._does_debug: logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', - target, safe_repr(args), safe_repr(kwargs)) + target, truncate(safe_repr(args), 1024), + truncate(safe_repr(kwargs), 1024)) return self.on_apply(target, args, kwargs, waitforslot=self.putlocks, diff --git a/awx/lib/site-packages/celery/concurrency/prefork.py b/awx/lib/site-packages/celery/concurrency/prefork.py index 75eb28efce..b579d0e100 100644 --- a/awx/lib/site-packages/celery/concurrency/prefork.py +++ b/awx/lib/site-packages/celery/concurrency/prefork.py @@ -57,10 +57,15 @@ def process_initializer(app, hostname): # run once per process. app.loader.init_worker() app.loader.init_worker_process() + logfile = os.environ.get('CELERY_LOG_FILE') or None + if logfile and '%i' in logfile.lower(): + # logfile path will differ so need to set up logging again. + app.log.already_setup = False app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), - os.environ.get('CELERY_LOG_FILE') or None, + logfile, bool(os.environ.get('CELERY_LOG_REDIRECT', False)), - str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL'))) + str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')), + hostname=hostname) if os.environ.get('FORKED_BY_MULTIPROCESSING'): # pool did execv after fork trace.setup_worker_optimizations(app) diff --git a/awx/lib/site-packages/celery/contrib/batches.py b/awx/lib/site-packages/celery/contrib/batches.py index a3feb1d284..0248ebf8d4 100644 --- a/awx/lib/site-packages/celery/contrib/batches.py +++ b/awx/lib/site-packages/celery/contrib/batches.py @@ -47,7 +47,7 @@ messages, and every 10 seconds. from celery.contrib.batches import Batches - wot_api_target = "https://api.mywot.com/0.4/public_link_json" + wot_api_target = 'https://api.mywot.com/0.4/public_link_json' @app.task(base=Batches, flush_every=100, flush_interval=10) def wot_api(requests): @@ -64,7 +64,7 @@ messages, and every 10 seconds. domains = [urlparse(url).netloc for url in urls] response = requests.get( wot_api_target, - params={"hosts": ('/').join(set(domains)) + '/'} + params={'hosts': ('/').join(set(domains)) + '/'} ) return [response.json[domain] for domain in domains] diff --git a/awx/lib/site-packages/celery/contrib/sphinx.py b/awx/lib/site-packages/celery/contrib/sphinx.py new file mode 100644 index 0000000000..d22d82f5fe --- /dev/null +++ b/awx/lib/site-packages/celery/contrib/sphinx.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +""" +celery.contrib.sphinx +===================== + +Sphinx documentation plugin + +**Usage** + +Add the extension to your :file:`docs/conf.py` configuration module: + +.. code-block:: python + + extensions = (..., + 'celery.contrib.sphinx') + +If you would like to change the prefix for tasks in reference documentation +then you can change the ``celery_task_prefix`` configuration value: + +.. code-block:: python + + celery_task_prefix = '(task)' # < default + + +With the extension installed `autodoc` will automatically find +task decorated objects and generate the correct (as well as +add a ``(task)`` prefix), and you can also refer to the tasks +using `:task:proj.tasks.add` syntax. + +Use ``.. autotask::`` to manually document a task. + +""" +from __future__ import absolute_import + +from inspect import formatargspec, getargspec + +from sphinx.domains.python import PyModulelevel +from sphinx.ext.autodoc import FunctionDocumenter + +from celery.app.task import BaseTask + + +class TaskDocumenter(FunctionDocumenter): + objtype = 'task' + member_order = 11 + + @classmethod + def can_document_member(cls, member, membername, isattr, parent): + return isinstance(member, BaseTask) and getattr(member, '__wrapped__') + + def format_args(self): + wrapped = getattr(self.object, '__wrapped__') + if wrapped is not None: + argspec = getargspec(wrapped) + fmt = formatargspec(*argspec) + fmt = fmt.replace('\\', '\\\\') + return fmt + return '' + + def document_members(self, all_members=False): + pass + + +class TaskDirective(PyModulelevel): + + def get_signature_prefix(self, sig): + return self.env.config.celery_task_prefix + + +def setup(app): + app.add_autodocumenter(TaskDocumenter) + app.domains['py'].directives['task'] = TaskDirective + app.add_config_value('celery_task_prefix', '(task)', True) diff --git a/awx/lib/site-packages/celery/datastructures.py b/awx/lib/site-packages/celery/datastructures.py index 9f7c34f71f..9c36a39575 100644 --- a/awx/lib/site-packages/celery/datastructures.py +++ b/awx/lib/site-packages/celery/datastructures.py @@ -12,7 +12,7 @@ import sys import time from collections import defaultdict, Mapping, MutableMapping, MutableSet -from heapq import heapify, heappush, heappop +from heapq import heappush, heappop from functools import partial from itertools import chain @@ -555,7 +555,7 @@ class LimitedSet(object): """Kind-of Set with limitations. Good for when you need to test for membership (`a in set`), - but the list might become to big. + but the list might become too big. :keyword maxlen: Maximum number of members before we start evicting expired members. @@ -634,7 +634,7 @@ class LimitedSet(object): if isinstance(other, LimitedSet): self._data.update(other._data) self._heap.extend(other._heap) - heapify(self._heap) + self._heap.sort() else: for obj in other: self.add(obj) diff --git a/awx/lib/site-packages/celery/events/dumper.py b/awx/lib/site-packages/celery/events/dumper.py index 2a3fd41ca1..323afc4e1a 100644 --- a/awx/lib/site-packages/celery/events/dumper.py +++ b/awx/lib/site-packages/celery/events/dumper.py @@ -45,6 +45,11 @@ class Dumper(object): def say(self, msg): print(msg, file=self.out) + # need to flush so that output can be piped. + try: + self.out.flush() + except AttributeError: + pass def on_event(self, ev): timestamp = datetime.utcfromtimestamp(ev.pop('timestamp')) diff --git a/awx/lib/site-packages/celery/events/state.py b/awx/lib/site-packages/celery/events/state.py index 05bb4f95c7..c78f2d08a1 100644 --- a/awx/lib/site-packages/celery/events/state.py +++ b/awx/lib/site-packages/celery/events/state.py @@ -18,12 +18,12 @@ """ from __future__ import absolute_import +import bisect import sys import threading from datetime import datetime from decimal import Decimal -from heapq import heapify, heappush, heappop from itertools import islice from operator import itemgetter from time import time @@ -35,7 +35,7 @@ from kombu.utils import cached_property, kwdict from celery import states from celery.five import class_property, items, values from celery.utils import deprecated -from celery.utils.functional import LRUCache +from celery.utils.functional import LRUCache, memoize from celery.utils.log import get_logger PYPY = hasattr(sys, 'pypy_version_info') @@ -66,6 +66,14 @@ R_TASK = '' __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires'] +@memoize(maxsize=1000, keyfun=lambda a, _: a[0]) +def _warn_drift(hostname, drift, local_received, timestamp): + # we use memoize here so the warning is only logged once per hostname + warn(DRIFT_WARNING, hostname, drift, + datetime.fromtimestamp(local_received), + datetime.fromtimestamp(timestamp)) + + def heartbeat_expires(timestamp, freq=60, expire_window=HEARTBEAT_EXPIRE_WINDOW, Decimal=Decimal, float=float, isinstance=isinstance): @@ -139,13 +147,15 @@ class Worker(object): def _create_event_handler(self): _set = object.__setattr__ - heartbeats = self.heartbeats hbmax = self.heartbeat_max + heartbeats = self.heartbeats + hb_pop = self.heartbeats.pop + hb_append = self.heartbeats.append def event(type_, timestamp=None, local_received=None, fields=None, - max_drift=HEARTBEAT_DRIFT_MAX, items=items, abs=abs, - heappush=heappush, heappop=heappop, int=int, len=len): + max_drift=HEARTBEAT_DRIFT_MAX, items=items, abs=abs, int=int, + insort=bisect.insort, len=len): fields = fields or {} for k, v in items(fields): _set(self, k, v) @@ -156,14 +166,16 @@ class Worker(object): return drift = abs(int(local_received) - int(timestamp)) if drift > HEARTBEAT_DRIFT_MAX: - warn(DRIFT_WARNING, self.hostname, drift, - datetime.fromtimestamp(local_received), - datetime.fromtimestamp(timestamp)) - if not heartbeats or ( - local_received and local_received > heartbeats[-1]): - heappush(heartbeats, local_received) - if len(heartbeats) > hbmax: - heappop(heartbeats) + _warn_drift(self.hostname, drift, + local_received, timestamp) + if local_received: + hearts = len(heartbeats) + if hearts > hbmax - 1: + hb_pop(0) + if hearts and local_received > heartbeats[-1]: + hb_append(local_received) + else: + insort(heartbeats, local_received) return event def update(self, f, **kw): @@ -485,6 +497,8 @@ class State(object): tfields = itemgetter('uuid', 'hostname', 'timestamp', 'local_received', 'clock') taskheap = self._taskheap + th_append = taskheap.append + th_pop = taskheap.pop # Removing events from task heap is an O(n) operation, # so easier to just account for the common number of events # for each task (PENDING->RECEIVED->STARTED->final) @@ -498,7 +512,8 @@ class State(object): get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__ def _event(event, - timetuple=timetuple, KeyError=KeyError, created=True): + timetuple=timetuple, KeyError=KeyError, + insort=bisect.insort, created=True): self.event_count += 1 if event_callback: event_callback(self, event) @@ -516,19 +531,20 @@ class State(object): except KeyError: pass else: + is_offline = subject == 'offline' try: worker, created = get_worker(hostname), False except KeyError: - if subject == 'offline': - worker, created = None, False + if is_offline: + worker, created = Worker(hostname), False else: worker = workers[hostname] = Worker(hostname) - if worker: - worker.event(subject, timestamp, local_received, event) + worker.event(subject, timestamp, local_received, event) if on_node_join and (created or subject == 'online'): on_node_join(worker) - if on_node_leave and subject == 'offline': + if on_node_leave and is_offline: on_node_leave(worker) + workers.pop(hostname, None) return (worker, created), subject elif group == 'task': (uuid, hostname, timestamp, @@ -549,11 +565,21 @@ class State(object): task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) + origin = hostname if is_client_event else worker.id - heappush(taskheap, - timetuple(clock, timestamp, origin, ref(task))) - if len(taskheap) > max_events_in_heap: - heappop(taskheap) + + # remove oldest event if exceeding the limit. + heaps = len(taskheap) + if heaps + 1 > max_events_in_heap: + th_pop(0) + + # most events will be dated later than the previous. + timetup = timetuple(clock, timestamp, origin, ref(task)) + if heaps and timetup > taskheap[-1]: + th_append(timetup) + else: + insort(taskheap, timetup) + if subject == 'received': self.task_count += 1 task.event(subject, timestamp, local_received, event) @@ -563,12 +589,12 @@ class State(object): return (task, created), subject return _event - def rebuild_taskheap(self, timetuple=timetuple, heapify=heapify): + def rebuild_taskheap(self, timetuple=timetuple): heap = self._taskheap[:] = [ timetuple(t.clock, t.timestamp, t.origin, ref(t)) for t in values(self.tasks) ] - heapify(heap) + heap.sort() def itertasks(self, limit=None): for index, row in enumerate(items(self.tasks)): diff --git a/awx/lib/site-packages/celery/exceptions.py b/awx/lib/site-packages/celery/exceptions.py index 25c7d4f4ab..ab65019416 100644 --- a/awx/lib/site-packages/celery/exceptions.py +++ b/awx/lib/site-packages/celery/exceptions.py @@ -8,13 +8,16 @@ """ from __future__ import absolute_import +import numbers + from .five import string_t from billiard.exceptions import ( # noqa SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated, ) -__all__ = ['SecurityError', 'Ignore', 'SystemTerminate', 'QueueNotFound', +__all__ = ['SecurityError', 'Ignore', 'QueueNotFound', + 'WorkerShutdown', 'WorkerTerminate', 'ImproperlyConfigured', 'NotRegistered', 'AlreadyRegistered', 'TimeoutError', 'MaxRetriesExceededError', 'Retry', 'TaskRevokedError', 'NotConfigured', 'AlwaysEagerIgnored', @@ -52,8 +55,13 @@ class Reject(Exception): return 'reject requeue=%s: %s' % (self.requeue, self.reason) -class SystemTerminate(SystemExit): - """Signals that the worker should terminate.""" +class WorkerTerminate(SystemExit): + """Signals that the worker should terminate immediately.""" +SystemTerminate = WorkerTerminate # XXX compat + + +class WorkerShutdown(SystemExit): + """Signals that the worker should perform a warm shutdown.""" class QueueNotFound(KeyError): @@ -92,7 +100,8 @@ class Retry(Exception): #: Exception (if any) that caused the retry to happen. exc = None - #: Time of retry (ETA), either int or :class:`~datetime.datetime`. + #: Time of retry (ETA), either :class:`numbers.Real` or + #: :class:`~datetime.datetime`. when = None def __init__(self, message=None, exc=None, when=None, **kwargs): @@ -106,7 +115,7 @@ class Retry(Exception): Exception.__init__(self, exc, when, **kwargs) def humanize(self): - if isinstance(self.when, int): + if isinstance(self.when, numbers.Real): return 'in {0.when}s'.format(self) return 'at {0.when}'.format(self) diff --git a/awx/lib/site-packages/celery/fixups/django.py b/awx/lib/site-packages/celery/fixups/django.py index cbf43941ae..ab20325f59 100644 --- a/awx/lib/site-packages/celery/fixups/django.py +++ b/awx/lib/site-packages/celery/fixups/django.py @@ -134,13 +134,22 @@ class DjangoWorkerFixup(object): ) def validate_models(self): - from django.core.management.validation import get_validation_errors s = io.StringIO() - num_errors = get_validation_errors(s, None) - if num_errors: - raise RuntimeError( - 'One or more Django models did not validate:\n{0}'.format( - s.getvalue())) + try: + from django.core.management.validation import get_validation_errors + except ImportError: + import django + from django.core.management.base import BaseCommand + django.setup() + cmd = BaseCommand() + cmd.stdout, cmd.stderr = sys.stdout, sys.stderr + cmd.check() + else: + num_errors = get_validation_errors(s, None) + if num_errors: + raise RuntimeError( + 'One or more Django models did not validate:\n{0}'.format( + s.getvalue())) def install(self): signals.beat_embedded_init.connect(self.close_database) diff --git a/awx/lib/site-packages/celery/loaders/base.py b/awx/lib/site-packages/celery/loaders/base.py index 1062b1bacb..d73547aadf 100644 --- a/awx/lib/site-packages/celery/loaders/base.py +++ b/awx/lib/site-packages/celery/loaders/base.py @@ -247,7 +247,7 @@ class BaseLoader(object): def autodiscover_tasks(self, packages, related_name='tasks'): self.task_modules.update( - mod.__name__ for mod in autodiscover_tasks(packages, + mod.__name__ for mod in autodiscover_tasks(packages or (), related_name) if mod) @property @@ -266,7 +266,7 @@ def autodiscover_tasks(packages, related_name='tasks'): global _RACE_PROTECTION if _RACE_PROTECTION: - return + return () _RACE_PROTECTION = True try: return [find_related_module(pkg, related_name) for pkg in packages] diff --git a/awx/lib/site-packages/celery/local.py b/awx/lib/site-packages/celery/local.py index 55fd6d164d..ada6e9381a 100644 --- a/awx/lib/site-packages/celery/local.py +++ b/awx/lib/site-packages/celery/local.py @@ -212,12 +212,27 @@ class PromiseProxy(Proxy): """ + __slots__ = ('__pending__', ) + def _get_current_object(self): try: return object.__getattribute__(self, '__thing') except AttributeError: return self.__evaluate__() + def __then__(self, fun, *args, **kwargs): + if self.__evaluated__(): + return fun(*args, **kwargs) + from collections import deque + try: + pending = object.__getattribute__(self, '__pending__') + except AttributeError: + pending = None + if pending is None: + pending = deque() + object.__setattr__(self, '__pending__', pending) + pending.append((fun, args, kwargs)) + def __evaluated__(self): try: object.__getattribute__(self, '__thing') @@ -243,6 +258,20 @@ class PromiseProxy(Proxy): except AttributeError: # pragma: no cover # May mask errors so ignore pass + try: + pending = object.__getattribute__(self, '__pending__') + except AttributeError: + pass + else: + try: + while pending: + fun, args, kwargs = pending.popleft() + fun(*args, **kwargs) + finally: + try: + object.__delattr__(self, '__pending__') + except AttributeError: + pass def maybe_evaluate(obj): diff --git a/awx/lib/site-packages/celery/platforms.py b/awx/lib/site-packages/celery/platforms.py index 2689126ead..c4013b5785 100644 --- a/awx/lib/site-packages/celery/platforms.py +++ b/awx/lib/site-packages/celery/platforms.py @@ -12,6 +12,7 @@ from __future__ import absolute_import, print_function import atexit import errno import math +import numbers import os import platform as _platform import signal as _signal @@ -49,6 +50,7 @@ EX_OK = getattr(os, 'EX_OK', 0) EX_FAILURE = 1 EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69) EX_USAGE = getattr(os, 'EX_USAGE', 64) +EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73) SYSTEM = _platform.system() IS_OSX = SYSTEM == 'Darwin' @@ -258,7 +260,8 @@ def create_pidlock(pidfile): def _create_pidlock(pidfile): pidlock = Pidfile(pidfile) if pidlock.is_locked() and not pidlock.remove_if_stale(): - raise SystemExit(PIDLOCKED.format(pidfile, pidlock.read_pid())) + print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr) + raise SystemExit(EX_CANTCREAT) pidlock.acquire() return pidlock @@ -266,9 +269,10 @@ def _create_pidlock(pidfile): if hasattr(os, 'closerange'): def close_open_fds(keep=None): - keep = list(uniq(sorted(filter(None, ( - maybe_fileno(f) for f in keep or [] - ))))) + # must make sure this is 0-inclusive (Issue #1882) + keep = list(uniq(sorted( + f for f in map(maybe_fileno, keep or []) if f is not None + ))) maxfd = get_fdmax(default=2048) kL, kH = iter([-1] + keep), iter(keep + [maxfd]) for low, high in zip_longest(kL, kH): @@ -606,7 +610,7 @@ class Signals(object): def signum(self, signal_name): """Get signal number from signal name.""" - if isinstance(signal_name, int): + if isinstance(signal_name, numbers.Integral): return signal_name if not isinstance(signal_name, string_t) \ or not signal_name.isupper(): diff --git a/awx/lib/site-packages/celery/result.py b/awx/lib/site-packages/celery/result.py index eef9558285..069d8fde8c 100644 --- a/awx/lib/site-packages/celery/result.py +++ b/awx/lib/site-packages/celery/result.py @@ -25,6 +25,7 @@ from .app import app_or_default from .datastructures import DependencyGraph, GraphFormatter from .exceptions import IncompleteStream, TimeoutError from .five import items, range, string_t, monotonic +from .utils import deprecated __all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult', 'EagerResult', 'result_from_tuple'] @@ -86,6 +87,7 @@ class AsyncResult(ResultBase): self.backend = backend or self.app.backend self.task_name = task_name self.parent = parent + self._cache = None def as_tuple(self): parent = self.parent @@ -117,7 +119,8 @@ class AsyncResult(ResultBase): terminate=terminate, signal=signal, reply=wait, timeout=timeout) - def get(self, timeout=None, propagate=True, interval=0.5): + def get(self, timeout=None, propagate=True, interval=0.5, no_ack=True, + follow_parents=True): """Wait until task is ready, and return its result. .. warning:: @@ -132,6 +135,10 @@ class AsyncResult(ResultBase): retrieve the result. Note that this does not have any effect when using the amqp result store backend, as it does not use polling. + :keyword no_ack: Enable amqp no ack (automatically acknowledge + message). If this is :const:`False` then the message will + **not be acked**. + :keyword follow_parents: Reraise any exception raised by parent task. :raises celery.exceptions.TimeoutError: if `timeout` is not :const:`None` and the result does not arrive within `timeout` @@ -142,15 +149,32 @@ class AsyncResult(ResultBase): """ assert_will_not_block() - if propagate and self.parent: - for node in reversed(list(self._parents())): - node.get(propagate=True, timeout=timeout, interval=interval) + on_interval = None + if follow_parents and propagate and self.parent: + on_interval = self._maybe_reraise_parent_error + on_interval() - return self.backend.wait_for(self.id, timeout=timeout, - propagate=propagate, - interval=interval) + if self._cache: + if propagate: + self.maybe_reraise() + return self.result + + try: + return self.backend.wait_for( + self.id, timeout=timeout, + propagate=propagate, + interval=interval, + on_interval=on_interval, + no_ack=no_ack, + ) + finally: + self._get_task_meta() # update self._cache wait = get # deprecated alias to :meth:`get`. + def _maybe_reraise_parent_error(self): + for node in reversed(list(self._parents())): + node.maybe_reraise() + def _parents(self): node = self.parent while node: @@ -160,32 +184,42 @@ class AsyncResult(ResultBase): def collect(self, intermediate=False, **kwargs): """Iterator, like :meth:`get` will wait for the task to complete, but will also follow :class:`AsyncResult` and :class:`ResultSet` - returned by the task, yielding for each result in the tree. + returned by the task, yielding ``(result, value)`` tuples for each + result in the tree. An example would be having the following tasks: .. code-block:: python - @task() - def A(how_many): - return group(B.s(i) for i in range(how_many)) + from celery import group + from proj.celery import app - @task() + @app.task(trail=True) + def A(how_many): + return group(B.s(i) for i in range(how_many))() + + @app.task(trail=True) def B(i): return pow2.delay(i) - @task() + @app.task(trail=True) def pow2(i): return i ** 2 + Note that the ``trail`` option must be enabled + so that the list of children is stored in ``result.children``. + This is the default but enabled explicitly for illustration. + Calling :meth:`collect` would return: .. code-block:: python + >>> from celery.result import ResultBase >>> from proj.tasks import A >>> result = A.delay(10) - >>> list(result.collect()) + >>> [v for v in result.collect() + ... if not isinstance(v, (ResultBase, tuple))] [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] """ @@ -227,6 +261,10 @@ class AsyncResult(ResultBase): """Returns :const:`True` if the task failed.""" return self.state == states.FAILURE + def maybe_reraise(self): + if self.state in states.PROPAGATE_STATES: + raise self.result + def build_graph(self, intermediate=False, formatter=None): graph = DependencyGraph( formatter=formatter or GraphFormatter(root=self.id, shape='oval'), @@ -269,6 +307,9 @@ class AsyncResult(ResultBase): def __reduce_args__(self): return self.id, self.backend, self.task_name, None, self.parent + def __del__(self): + self._cache = None + @cached_property def graph(self): return self.build_graph() @@ -279,22 +320,42 @@ class AsyncResult(ResultBase): @property def children(self): - children = self.backend.get_children(self.id) + return self._get_task_meta().get('children') + + def _get_task_meta(self): + if self._cache is None: + meta = self.backend.get_task_meta(self.id) + if meta: + state = meta['status'] + if state == states.SUCCESS or state in states.PROPAGATE_STATES: + self._set_cache(meta) + return self._set_cache(meta) + return meta + return self._cache + + def _set_cache(self, d): + state, children = d['status'], d.get('children') + if state in states.EXCEPTION_STATES: + d['result'] = self.backend.exception_to_python(d['result']) if children: - return [result_from_tuple(child, self.app) for child in children] + d['children'] = [ + result_from_tuple(child, self.app) for child in children + ] + self._cache = d + return d @property def result(self): """When the task has been executed, this contains the return value. If the task raised an exception, this will be the exception instance.""" - return self.backend.get_result(self.id) + return self._get_task_meta()['result'] info = result @property def traceback(self): """Get the traceback of a failed task.""" - return self.backend.get_traceback(self.id) + return self._get_task_meta().get('traceback') @property def state(self): @@ -326,7 +387,7 @@ class AsyncResult(ResultBase): then contains the tasks return value. """ - return self.backend.get_status(self.id) + return self._get_task_meta()['status'] status = state @property @@ -415,6 +476,10 @@ class ResultSet(ResultBase): """ return any(result.failed() for result in self.results) + def maybe_reraise(self): + for result in self.results: + result.maybe_reraise() + def waiting(self): """Are any of the tasks incomplete? @@ -471,13 +536,9 @@ class ResultSet(ResultBase): """`res[i] -> res.results[i]`""" return self.results[index] + @deprecated('3.2', '3.3') def iterate(self, timeout=None, propagate=True, interval=0.5): - """Iterate over the return values of the tasks as they finish - one by one. - - :raises: The exception if any of the tasks raised an exception. - - """ + """Deprecated method, use :meth:`get` with a callback argument.""" elapsed = 0.0 results = OrderedDict((result.id, copy(result)) for result in self.results) @@ -499,7 +560,8 @@ class ResultSet(ResultBase): if timeout and elapsed >= timeout: raise TimeoutError('The operation timed out') - def get(self, timeout=None, propagate=True, interval=0.5, callback=None): + def get(self, timeout=None, propagate=True, interval=0.5, + callback=None, no_ack=True): """See :meth:`join` This is here for API compatibility with :class:`AsyncResult`, @@ -509,9 +571,10 @@ class ResultSet(ResultBase): """ return (self.join_native if self.supports_native_join else self.join)( timeout=timeout, propagate=propagate, - interval=interval, callback=callback) + interval=interval, callback=callback, no_ack=no_ack) - def join(self, timeout=None, propagate=True, interval=0.5, callback=None): + def join(self, timeout=None, propagate=True, interval=0.5, + callback=None, no_ack=True): """Gathers the results of all tasks as a list in order. .. note:: @@ -543,9 +606,19 @@ class ResultSet(ResultBase): No results will be returned by this function if a callback is specified. The order of results is also arbitrary when a callback is used. + To get access to the result object for a particular + id you will have to generate an index first: + ``index = {r.id: r for r in gres.results.values()}`` + Or you can create new result objects on the fly: + ``result = app.AsyncResult(task_id)`` (both will + take advantage of the backend cache anyway). - :raises celery.exceptions.TimeoutError: if `timeout` is not - :const:`None` and the operation takes longer than `timeout` + :keyword no_ack: Automatic message acknowledgement (Note that if this + is set to :const:`False` then the messages *will not be + acknowledged*). + + :raises celery.exceptions.TimeoutError: if ``timeout`` is not + :const:`None` and the operation takes longer than ``timeout`` seconds. """ @@ -560,16 +633,17 @@ class ResultSet(ResultBase): remaining = timeout - (monotonic() - time_start) if remaining <= 0.0: raise TimeoutError('join operation timed out') - value = result.get(timeout=remaining, - propagate=propagate, - interval=interval) + value = result.get( + timeout=remaining, propagate=propagate, + interval=interval, no_ack=no_ack, + ) if callback: callback(result.id, value) else: results.append(value) return results - def iter_native(self, timeout=None, interval=0.5): + def iter_native(self, timeout=None, interval=0.5, no_ack=True): """Backend optimized version of :meth:`iterate`. .. versionadded:: 2.2 @@ -585,11 +659,12 @@ class ResultSet(ResultBase): if not results: return iter([]) return results[0].backend.get_many( - set(r.id for r in results), timeout=timeout, interval=interval, + set(r.id for r in results), + timeout=timeout, interval=interval, no_ack=no_ack, ) def join_native(self, timeout=None, propagate=True, - interval=0.5, callback=None): + interval=0.5, callback=None, no_ack=True): """Backend optimized version of :meth:`join`. .. versionadded:: 2.2 @@ -606,7 +681,7 @@ class ResultSet(ResultBase): (result.id, i) for i, result in enumerate(self.results) ) acc = None if callback else [None for _ in range(len(self))] - for task_id, meta in self.iter_native(timeout, interval): + for task_id, meta in self.iter_native(timeout, interval, no_ack): value = meta['result'] if propagate and meta['status'] in states.PROPAGATE_STATES: raise value @@ -759,6 +834,10 @@ class EagerResult(AsyncResult): self._state = state self._traceback = traceback + def _get_task_meta(self): + return {'task_id': self.id, 'result': self._result, 'status': + self._state, 'traceback': self._traceback} + def __reduce__(self): return self.__class__, self.__reduce_args__() diff --git a/awx/lib/site-packages/celery/schedules.py b/awx/lib/site-packages/celery/schedules.py index f437bde45a..6424dfa042 100644 --- a/awx/lib/site-packages/celery/schedules.py +++ b/awx/lib/site-packages/celery/schedules.py @@ -9,6 +9,7 @@ """ from __future__ import absolute_import +import numbers import re from collections import namedtuple @@ -401,7 +402,7 @@ class crontab(schedule): week. """ - if isinstance(cronspec, int): + if isinstance(cronspec, numbers.Integral): result = set([cronspec]) elif isinstance(cronspec, string_t): result = crontab_parser(max_, min_).parse(cronspec) @@ -583,7 +584,7 @@ class crontab(schedule): def maybe_schedule(s, relative=False, app=None): if s is not None: - if isinstance(s, int): + if isinstance(s, numbers.Integral): s = timedelta(seconds=s) if isinstance(s, timedelta): return schedule(s, relative, app=app) diff --git a/awx/lib/site-packages/celery/security/certificate.py b/awx/lib/site-packages/celery/security/certificate.py index df2387e6f1..c1c520c27d 100644 --- a/awx/lib/site-packages/celery/security/certificate.py +++ b/awx/lib/site-packages/celery/security/certificate.py @@ -35,7 +35,7 @@ class Certificate(object): def get_serial_number(self): """Return the serial number in the certificate.""" - return self._cert.get_serial_number() + return bytes_to_str(self._cert.get_serial_number()) def get_issuer(self): """Return issuer (CA) as a string""" @@ -66,14 +66,15 @@ class CertStore(object): def __getitem__(self, id): """get certificate by id""" try: - return self._certs[id] + return self._certs[bytes_to_str(id)] except KeyError: raise SecurityError('Unknown certificate: {0!r}'.format(id)) def add_cert(self, cert): - if cert.get_id() in self._certs: + cert_id = bytes_to_str(cert.get_id()) + if cert_id in self._certs: raise SecurityError('Duplicate certificate: {0!r}'.format(id)) - self._certs[cert.get_id()] = cert + self._certs[cert_id] = cert class FSCertStore(CertStore): diff --git a/awx/lib/site-packages/celery/security/serialization.py b/awx/lib/site-packages/celery/security/serialization.py index 0a45b5e978..f1cab29148 100644 --- a/awx/lib/site-packages/celery/security/serialization.py +++ b/awx/lib/site-packages/celery/security/serialization.py @@ -44,7 +44,7 @@ class SecureSerializer(object): assert self._cert is not None with reraise_errors('Unable to serialize: {0!r}', (Exception, )): content_type, content_encoding, body = dumps( - data, serializer=self._serializer) + bytes_to_str(data), serializer=self._serializer) # What we sign is the serialized body, not the body itself. # this way the receiver doesn't have to decode the contents # to verify the signature (and thus avoiding potential flaws @@ -89,15 +89,12 @@ class SecureSerializer(object): v = raw_payload[end_of_sig:].split(sep) - values = [bytes_to_str(signer), bytes_to_str(signature), - bytes_to_str(v[0]), bytes_to_str(v[1]), bytes_to_str(v[2])] - return { - 'signer': values[0], - 'signature': values[1], - 'content_type': values[2], - 'content_encoding': values[3], - 'body': values[4], + 'signer': signer, + 'signature': signature, + 'content_type': bytes_to_str(v[0]), + 'content_encoding': bytes_to_str(v[1]), + 'body': bytes_to_str(v[2]), } diff --git a/awx/lib/site-packages/celery/task/base.py b/awx/lib/site-packages/celery/task/base.py index f223fbbdd0..9d466b57c4 100644 --- a/awx/lib/site-packages/celery/task/base.py +++ b/awx/lib/site-packages/celery/task/base.py @@ -69,6 +69,16 @@ class Task(BaseTask): def request(cls): return cls._get_request() + @class_property + def backend(cls): + if cls._backend is None: + return cls.app.backend + return cls._backend + + @backend.setter + def backend(cls, value): # noqa + cls._backend = value + @classmethod def get_logger(self, **kwargs): return get_task_logger(self.name) @@ -159,7 +169,7 @@ class PeriodicTask(Task): def task(*args, **kwargs): - """Deprecated decorators, please use :meth:`~@task`.""" + """Deprecated decorator, please use :func:`celery.task`.""" return current_app.task(*args, **dict({'accept_magic_kwargs': False, 'base': Task}, **kwargs)) diff --git a/awx/lib/site-packages/celery/tests/__init__.py b/awx/lib/site-packages/celery/tests/__init__.py index 4394ba977e..966787270b 100644 --- a/awx/lib/site-packages/celery/tests/__init__.py +++ b/awx/lib/site-packages/celery/tests/__init__.py @@ -22,7 +22,7 @@ def setup(): KOMBU_DISABLE_LIMIT_PROTECTION='yes', ) - if os.environ.get('COVER_ALL_MODULES') or '--with-coverage3' in sys.argv: + if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv: from warnings import catch_warnings with catch_warnings(record=True): import_all_modules() diff --git a/awx/lib/site-packages/celery/tests/app/test_app.py b/awx/lib/site-packages/celery/tests/app/test_app.py index 7d209d57de..113dedae14 100644 --- a/awx/lib/site-packages/celery/tests/app/test_app.py +++ b/awx/lib/site-packages/celery/tests/app/test_app.py @@ -549,14 +549,14 @@ class test_App(AppCase): # Test passing in a string and make sure the string # gets there untouched self.app.conf.BROKER_FAILOVER_STRATEGY = 'foo-bar' - self.assertEquals( + self.assertEqual( self.app.connection('amqp:////value').failover_strategy, 'foo-bar', ) # Try passing in None self.app.conf.BROKER_FAILOVER_STRATEGY = None - self.assertEquals( + self.assertEqual( self.app.connection('amqp:////value').failover_strategy, itertools.cycle, ) @@ -566,7 +566,7 @@ class test_App(AppCase): yield True self.app.conf.BROKER_FAILOVER_STRATEGY = my_failover_strategy - self.assertEquals( + self.assertEqual( self.app.connection('amqp:////value').failover_strategy, my_failover_strategy, ) @@ -644,7 +644,7 @@ class test_App(AppCase): class test_defaults(AppCase): - def test_str_to_bool(self): + def test_strtobool(self): for s in ('false', 'no', '0'): self.assertFalse(defaults.strtobool(s)) for s in ('true', 'yes', '1'): diff --git a/awx/lib/site-packages/celery/tests/app/test_beat.py b/awx/lib/site-packages/celery/tests/app/test_beat.py index 0a1755ef19..62310805a3 100644 --- a/awx/lib/site-packages/celery/tests/app/test_beat.py +++ b/awx/lib/site-packages/celery/tests/app/test_beat.py @@ -162,7 +162,7 @@ class test_Scheduler(AppCase): scheduler.apply_async(scheduler.Entry(task=foo.name, app=self.app)) self.assertTrue(foo.apply_async.called) - def test_apply_async_should_not_sync(self): + def test_should_sync(self): @self.app.task(shared=False) def not_sync(): @@ -181,6 +181,41 @@ class test_Scheduler(AppCase): s.apply_async(s.Entry(task=not_sync.name, app=self.app)) self.assertFalse(s._do_sync.called) + def test_should_sync_increments_sync_every_counter(self): + self.app.conf.CELERYBEAT_SYNC_EVERY = 2 + + @self.app.task(shared=False) + def not_sync(): + pass + not_sync.apply_async = Mock() + + s = mScheduler(app=self.app) + self.assertEqual(s.sync_every_tasks, 2) + s._do_sync = Mock() + + s.apply_async(s.Entry(task=not_sync.name, app=self.app)) + self.assertEqual(s._tasks_since_sync, 1) + s.apply_async(s.Entry(task=not_sync.name, app=self.app)) + s._do_sync.assert_called_with() + + self.app.conf.CELERYBEAT_SYNC_EVERY = 0 + + def test_sync_task_counter_resets_on_do_sync(self): + self.app.conf.CELERYBEAT_SYNC_EVERY = 1 + + @self.app.task(shared=False) + def not_sync(): + pass + not_sync.apply_async = Mock() + + s = mScheduler(app=self.app) + self.assertEqual(s.sync_every_tasks, 1) + + s.apply_async(s.Entry(task=not_sync.name, app=self.app)) + self.assertEqual(s._tasks_since_sync, 0) + + self.app.conf.CELERYBEAT_SYNC_EVERY = 0 + @patch('celery.app.base.Celery.send_task') def test_send_task(self, send_task): b = beat.Scheduler(app=self.app) diff --git a/awx/lib/site-packages/celery/tests/app/test_loaders.py b/awx/lib/site-packages/celery/tests/app/test_loaders.py index 037ef4df68..f1b1bb0378 100644 --- a/awx/lib/site-packages/celery/tests/app/test_loaders.py +++ b/awx/lib/site-packages/celery/tests/app/test_loaders.py @@ -7,7 +7,6 @@ import warnings from celery import loaders from celery.exceptions import ( NotConfigured, - CPendingDeprecationWarning, ) from celery.loaders import base from celery.loaders import default @@ -34,16 +33,12 @@ class test_loaders(AppCase): @depends_on_current_app def test_current_loader(self): - with self.assertWarnsRegex( - CPendingDeprecationWarning, - r'deprecation'): + with self.assertPendingDeprecation(): self.assertIs(loaders.current_loader(), self.app.loader) @depends_on_current_app def test_load_settings(self): - with self.assertWarnsRegex( - CPendingDeprecationWarning, - r'deprecation'): + with self.assertPendingDeprecation(): self.assertIs(loaders.load_settings(), self.app.conf) diff --git a/awx/lib/site-packages/celery/tests/app/test_log.py b/awx/lib/site-packages/celery/tests/app/test_log.py index c6f90f783b..f430d8b5b6 100644 --- a/awx/lib/site-packages/celery/tests/app/test_log.py +++ b/awx/lib/site-packages/celery/tests/app/test_log.py @@ -2,6 +2,9 @@ from __future__ import absolute_import import sys import logging + +from collections import defaultdict +from io import StringIO from tempfile import mktemp from celery import signals @@ -248,14 +251,31 @@ class test_default_logger(AppCase): l.info('The quick brown fox...') self.assertIn('The quick brown fox...', stderr.getvalue()) - def test_setup_logger_no_handlers_file(self): - with restore_logging(): - l = self.get_logger() - l.handlers = [] - tempfile = mktemp(suffix='unittest', prefix='celery') - l = self.setup_logger(logfile=tempfile, loglevel=0, root=False) - self.assertIsInstance(get_handlers(l)[0], - logging.FileHandler) + @patch('os.fstat') + def test_setup_logger_no_handlers_file(self, *args): + tempfile = mktemp(suffix='unittest', prefix='celery') + _open = ('builtins.open' if sys.version_info[0] == 3 + else '__builtin__.open') + with patch(_open) as osopen: + with restore_logging(): + files = defaultdict(StringIO) + + def open_file(filename, *args, **kwargs): + f = files[filename] + f.fileno = Mock() + f.fileno.return_value = 99 + return f + + osopen.side_effect = open_file + l = self.get_logger() + l.handlers = [] + l = self.setup_logger( + logfile=tempfile, loglevel=logging.INFO, root=False, + ) + self.assertIsInstance( + get_handlers(l)[0], logging.FileHandler, + ) + self.assertIn(tempfile, files) def test_redirect_stdouts(self): with restore_logging(): @@ -336,7 +356,7 @@ class test_task_logger(test_default_logger): return self.app.log.setup_task_loggers(*args, **kwargs) def get_logger(self, *args, **kwargs): - return get_task_logger("test_task_logger") + return get_task_logger('test_task_logger') class test_patch_logger_cls(AppCase): diff --git a/awx/lib/site-packages/celery/tests/backends/test_amqp.py b/awx/lib/site-packages/celery/tests/backends/test_amqp.py index 30a468446f..7e5a361966 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_amqp.py +++ b/awx/lib/site-packages/celery/tests/backends/test_amqp.py @@ -33,8 +33,8 @@ class test_AMQPBackend(AppCase): return AMQPBackend(self.app, **opts) def test_mark_as_done(self): - tb1 = self.create_backend() - tb2 = self.create_backend() + tb1 = self.create_backend(max_cached_results=1) + tb2 = self.create_backend(max_cached_results=1) tid = uuid() @@ -175,7 +175,7 @@ class test_AMQPBackend(AppCase): class MockBackend(AMQPBackend): Queue = MockBinding - backend = MockBackend(self.app) + backend = MockBackend(self.app, max_cached_results=100) backend._republish = Mock() yield results, backend, Message @@ -183,29 +183,30 @@ class test_AMQPBackend(AppCase): def test_backlog_limit_exceeded(self): with self._result_context() as (results, backend, Message): for i in range(1001): - results.put(Message(status=states.RECEIVED)) + results.put(Message(task_id='id', status=states.RECEIVED)) with self.assertRaises(backend.BacklogLimitExceeded): backend.get_task_meta('id') def test_poll_result(self): with self._result_context() as (results, backend, Message): + tid = uuid() # FFWD's to the latest state. state_messages = [ - Message(status=states.RECEIVED, seq=1), - Message(status=states.STARTED, seq=2), - Message(status=states.FAILURE, seq=3), + Message(task_id=tid, status=states.RECEIVED, seq=1), + Message(task_id=tid, status=states.STARTED, seq=2), + Message(task_id=tid, status=states.FAILURE, seq=3), ] for state_message in state_messages: results.put(state_message) - r1 = backend.get_task_meta(uuid()) + r1 = backend.get_task_meta(tid) self.assertDictContainsSubset( {'status': states.FAILURE, 'seq': 3}, r1, 'FFWDs to the last state', ) # Caches last known state. - results.put(Message()) tid = uuid() + results.put(Message(task_id=tid)) backend.get_task_meta(tid) self.assertIn(tid, backend._cache, 'Caches last known state') @@ -261,7 +262,7 @@ class test_AMQPBackend(AppCase): b.drain_events(Connection(), consumer, timeout=0.1) def test_get_many(self): - b = self.create_backend() + b = self.create_backend(max_cached_results=10) tids = [] for i in range(10): diff --git a/awx/lib/site-packages/celery/tests/backends/test_base.py b/awx/lib/site-packages/celery/tests/backends/test_base.py index 17b569c33e..58e3e8d524 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_base.py +++ b/awx/lib/site-packages/celery/tests/backends/test_base.py @@ -20,6 +20,7 @@ from celery.backends.base import ( KeyValueStoreBackend, DisabledBackend, ) +from celery.result import result_from_tuple from celery.utils import uuid from celery.tests.case import AppCase, Mock, SkipTest, patch @@ -61,7 +62,7 @@ class test_BaseBackend_interface(AppCase): self.b.forget('SOMExx-N0nex1stant-IDxx-') def test_on_chord_part_return(self): - self.b.on_chord_part_return(None) + self.b.on_chord_part_return(None, None, None) def test_apply_chord(self, unlock='celery.chord_unlock'): self.app.tasks[unlock] = Mock() @@ -219,13 +220,24 @@ class test_BaseBackend_dict(AppCase): self.assertTrue(args[2]) def test_prepare_value_serializes_group_result(self): + self.b.serializer = 'json' g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) - self.assertIsInstance(self.b.prepare_value(g), (list, tuple)) + v = self.b.prepare_value(g) + self.assertIsInstance(v, (list, tuple)) + self.assertEqual(result_from_tuple(v, app=self.app), g) + + v2 = self.b.prepare_value(g[0]) + self.assertIsInstance(v2, (list, tuple)) + self.assertEqual(result_from_tuple(v2, app=self.app), g[0]) + + self.b.serializer = 'pickle' + self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult) def test_is_cached(self): - self.b._cache['foo'] = 1 - self.assertTrue(self.b.is_cached('foo')) - self.assertFalse(self.b.is_cached('false')) + b = BaseBackend(app=self.app, max_cached_results=1) + b._cache['foo'] = 1 + self.assertTrue(b.is_cached('foo')) + self.assertFalse(b.is_cached('false')) class test_KeyValueStoreBackend(AppCase): @@ -235,7 +247,7 @@ class test_KeyValueStoreBackend(AppCase): def test_on_chord_part_return(self): assert not self.b.implements_incr - self.b.on_chord_part_return(None) + self.b.on_chord_part_return(None, None, None) def test_get_store_delete_result(self): tid = uuid() @@ -271,12 +283,14 @@ class test_KeyValueStoreBackend(AppCase): def test_chord_part_return_no_gid(self): self.b.implements_incr = True task = Mock() + state = 'SUCCESS' + result = 10 task.request.group = None self.b.get_key_for_chord = Mock() self.b.get_key_for_chord.side_effect = AssertionError( 'should not get here', ) - self.assertIsNone(self.b.on_chord_part_return(task)) + self.assertIsNone(self.b.on_chord_part_return(task, state, result)) @contextmanager def _chord_part_context(self, b): @@ -287,8 +301,8 @@ class test_KeyValueStoreBackend(AppCase): b.implements_incr = True b.client = Mock() - with patch('celery.result.GroupResult') as GR: - deps = GR.restore.return_value = Mock() + with patch('celery.backends.base.GroupResult') as GR: + deps = GR.restore.return_value = Mock(name='DEPS') deps.__len__ = Mock() deps.__len__.return_value = 10 b.incr = Mock() @@ -304,14 +318,14 @@ class test_KeyValueStoreBackend(AppCase): def test_chord_part_return_propagate_set(self): with self._chord_part_context(self.b) as (task, deps, _): - self.b.on_chord_part_return(task, propagate=True) + self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=True) self.assertFalse(self.b.expire.called) deps.delete.assert_called_with() deps.join_native.assert_called_with(propagate=True, timeout=3.0) def test_chord_part_return_propagate_default(self): with self._chord_part_context(self.b) as (task, deps, _): - self.b.on_chord_part_return(task, propagate=None) + self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=None) self.assertFalse(self.b.expire.called) deps.delete.assert_called_with() deps.join_native.assert_called_with( @@ -323,7 +337,7 @@ class test_KeyValueStoreBackend(AppCase): with self._chord_part_context(self.b) as (task, deps, callback): deps._failed_join_report = lambda: iter([]) deps.join_native.side_effect = KeyError('foo') - self.b.on_chord_part_return(task) + self.b.on_chord_part_return(task, 'SUCCESS', 10) self.assertTrue(self.b.fail_from_current_stack.called) args = self.b.fail_from_current_stack.call_args exc = args[1]['exc'] @@ -331,14 +345,15 @@ class test_KeyValueStoreBackend(AppCase): self.assertIn('foo', str(exc)) def test_chord_part_return_join_raises_task(self): - with self._chord_part_context(self.b) as (task, deps, callback): + b = KVBackend(serializer='pickle', app=self.app) + with self._chord_part_context(b) as (task, deps, callback): deps._failed_join_report = lambda: iter([ self.app.AsyncResult('culprit'), ]) deps.join_native.side_effect = KeyError('foo') - self.b.on_chord_part_return(task) - self.assertTrue(self.b.fail_from_current_stack.called) - args = self.b.fail_from_current_stack.call_args + b.on_chord_part_return(task, 'SUCCESS', 10) + self.assertTrue(b.fail_from_current_stack.called) + args = b.fail_from_current_stack.call_args exc = args[1]['exc'] self.assertIsInstance(exc, ChordError) self.assertIn('Dependency culprit raised', str(exc)) diff --git a/awx/lib/site-packages/celery/tests/backends/test_cache.py b/awx/lib/site-packages/celery/tests/backends/test_cache.py index 08165b5d36..051760a8de 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_cache.py +++ b/awx/lib/site-packages/celery/tests/backends/test_cache.py @@ -86,10 +86,10 @@ class test_CacheBackend(AppCase): tb.apply_chord(group(app=self.app), (), gid, {}, result=res) self.assertFalse(deps.join_native.called) - tb.on_chord_part_return(task) + tb.on_chord_part_return(task, 'SUCCESS', 10) self.assertFalse(deps.join_native.called) - tb.on_chord_part_return(task) + tb.on_chord_part_return(task, 'SUCCESS', 10) deps.join_native.assert_called_with(propagate=True, timeout=3.0) deps.delete.assert_called_with() diff --git a/awx/lib/site-packages/celery/tests/backends/test_couchbase.py b/awx/lib/site-packages/celery/tests/backends/test_couchbase.py index 96395583d9..3dc6aadd0b 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_couchbase.py +++ b/awx/lib/site-packages/celery/tests/backends/test_couchbase.py @@ -129,8 +129,8 @@ class test_CouchBaseBackend(AppCase): url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket' with self.Celery(backend=url) as app: x = app.backend - self.assertEqual(x.bucket, "mycoolbucket") - self.assertEqual(x.host, "myhost") - self.assertEqual(x.username, "johndoe") - self.assertEqual(x.password, "mysecret") + self.assertEqual(x.bucket, 'mycoolbucket') + self.assertEqual(x.host, 'myhost') + self.assertEqual(x.username, 'johndoe') + self.assertEqual(x.password, 'mysecret') self.assertEqual(x.port, 123) diff --git a/awx/lib/site-packages/celery/tests/backends/test_database.py b/awx/lib/site-packages/celery/tests/backends/test_database.py index fac02215e1..6b5bf9420a 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_database.py +++ b/awx/lib/site-packages/celery/tests/backends/test_database.py @@ -42,16 +42,16 @@ class test_DatabaseBackend(AppCase): self.uri = 'sqlite:///test.db' def test_retry_helper(self): - from celery.backends.database import OperationalError + from celery.backends.database import DatabaseError calls = [0] @retry def raises(): calls[0] += 1 - raise OperationalError(1, 2, 3) + raise DatabaseError(1, 2, 3) - with self.assertRaises(OperationalError): + with self.assertRaises(DatabaseError): raises(max_retries=5) self.assertEqual(calls[0], 5) diff --git a/awx/lib/site-packages/celery/tests/backends/test_mongodb.py b/awx/lib/site-packages/celery/tests/backends/test_mongodb.py index cbd80dd1bd..e260d87f00 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_mongodb.py +++ b/awx/lib/site-packages/celery/tests/backends/test_mongodb.py @@ -10,7 +10,7 @@ from celery.backends import mongodb as module from celery.backends.mongodb import MongoBackend, Bunch, pymongo from celery.exceptions import ImproperlyConfigured from celery.tests.case import ( - AppCase, MagicMock, Mock, SkipTest, + AppCase, MagicMock, Mock, SkipTest, ANY, depends_on_current_app, patch, sentinel, ) @@ -176,7 +176,7 @@ class test_MongoBackend(AppCase): mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.save.assert_called_once() + mock_collection.save.assert_called_once_with(ANY) self.assertEqual(sentinel.result, ret_val) @patch('celery.backends.mongodb.MongoBackend._get_database') @@ -232,7 +232,7 @@ class test_MongoBackend(AppCase): mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.save.assert_called_once() + mock_collection.save.assert_called_once_with(ANY) self.assertEqual(sentinel.result, ret_val) @patch('celery.backends.mongodb.MongoBackend._get_database') @@ -309,7 +309,7 @@ class test_MongoBackend(AppCase): mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with( MONGODB_COLLECTION) - mock_collection.assert_called_once() + mock_collection.assert_called_once_with() def test_get_database_authfailure(self): x = MongoBackend(app=self.app) diff --git a/awx/lib/site-packages/celery/tests/backends/test_redis.py b/awx/lib/site-packages/celery/tests/backends/test_redis.py index 25b0c5750b..0ecc5258b0 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_redis.py +++ b/awx/lib/site-packages/celery/tests/backends/test_redis.py @@ -4,36 +4,55 @@ from datetime import timedelta from pickle import loads, dumps -from kombu.utils import cached_property, uuid - from celery import signature from celery import states from celery import group +from celery import uuid from celery.datastructures import AttributeDict -from celery.exceptions import CPendingDeprecationWarning, ImproperlyConfigured +from celery.exceptions import ImproperlyConfigured from celery.utils.timeutils import timedelta_seconds from celery.tests.case import ( - AppCase, Mock, SkipTest, depends_on_current_app, patch, + AppCase, Mock, MockCallbacks, SkipTest, depends_on_current_app, patch, ) -class Redis(object): +class Connection(object): + connected = True - class Connection(object): - connected = True + def disconnect(self): + self.connected = False - def disconnect(self): - self.connected = False + +class Pipeline(object): + + def __init__(self, client): + self.client = client + self.steps = [] + + def __getattr__(self, attr): + + def add_step(*args, **kwargs): + self.steps.append((getattr(self.client, attr), args, kwargs)) + return self + return add_step + + def execute(self): + return [step(*a, **kw) for step, a, kw in self.steps] + + +class Redis(MockCallbacks): + Connection = Connection + Pipeline = Pipeline def __init__(self, host=None, port=None, db=None, password=None, **kw): self.host = host self.port = port self.db = db self.password = password - self.connection = self.Connection() self.keyspace = {} self.expiry = {} + self.connection = self.Connection() def get(self, key): return self.keyspace.get(key) @@ -47,12 +66,29 @@ class Redis(object): def expire(self, key, expires): self.expiry[key] = expires + return expires def delete(self, key): - self.keyspace.pop(key) + return bool(self.keyspace.pop(key, None)) - def publish(self, key, value): - pass + def pipeline(self): + return self.Pipeline(self) + + def _get_list(self, key): + try: + return self.keyspace[key] + except KeyError: + l = self.keyspace[key] = [] + return l + + def rpush(self, key, value): + self._get_list(key).append(value) + + def lrange(self, key, start, stop): + return self._get_list(key)[start:stop] + + def llen(self, key): + return len(self.keyspace.get(key) or []) class redis(object): @@ -72,41 +108,34 @@ class redis(object): class test_RedisBackend(AppCase): def get_backend(self): - from celery.backends import redis + from celery.backends.redis import RedisBackend - class RedisBackend(redis.RedisBackend): + class _RedisBackend(RedisBackend): redis = redis - return RedisBackend + return _RedisBackend def setup(self): self.Backend = self.get_backend() - class MockBackend(self.Backend): - - @cached_property - def client(self): - return Mock() - - self.MockBackend = MockBackend - @depends_on_current_app def test_reduce(self): try: from celery.backends.redis import RedisBackend - x = RedisBackend(app=self.app) + x = RedisBackend(app=self.app, new_join=True) self.assertTrue(loads(dumps(x))) except ImportError: raise SkipTest('redis not installed') def test_no_redis(self): - self.MockBackend.redis = None + self.Backend.redis = None with self.assertRaises(ImproperlyConfigured): - self.MockBackend(app=self.app) + self.Backend(app=self.app, new_join=True) def test_url(self): - x = self.MockBackend( + x = self.Backend( 'redis://:bosco@vandelay.com:123//1', app=self.app, + new_join=True, ) self.assertTrue(x.connparams) self.assertEqual(x.connparams['host'], 'vandelay.com') @@ -115,8 +144,9 @@ class test_RedisBackend(AppCase): self.assertEqual(x.connparams['password'], 'bosco') def test_socket_url(self): - x = self.MockBackend( + x = self.Backend( 'socket:///tmp/redis.sock?virtual_host=/3', app=self.app, + new_join=True, ) self.assertTrue(x.connparams) self.assertEqual(x.connparams['path'], '/tmp/redis.sock') @@ -129,20 +159,17 @@ class test_RedisBackend(AppCase): self.assertEqual(x.connparams['db'], 3) def test_compat_propertie(self): - x = self.MockBackend( + x = self.Backend( 'redis://:bosco@vandelay.com:123//1', app=self.app, + new_join=True, ) - with self.assertWarnsRegex(CPendingDeprecationWarning, - r'scheduled for deprecation'): + with self.assertPendingDeprecation(): self.assertEqual(x.host, 'vandelay.com') - with self.assertWarnsRegex(CPendingDeprecationWarning, - r'scheduled for deprecation'): + with self.assertPendingDeprecation(): self.assertEqual(x.db, 1) - with self.assertWarnsRegex(CPendingDeprecationWarning, - r'scheduled for deprecation'): + with self.assertPendingDeprecation(): self.assertEqual(x.port, 123) - with self.assertWarnsRegex(CPendingDeprecationWarning, - r'scheduled for deprecation'): + with self.assertPendingDeprecation(): self.assertEqual(x.password, 'bosco') def test_conf_raises_KeyError(self): @@ -152,71 +179,85 @@ class test_RedisBackend(AppCase): 'CELERY_ACCEPT_CONTENT': ['json'], 'CELERY_TASK_RESULT_EXPIRES': None, }) - self.MockBackend(app=self.app) + self.Backend(app=self.app, new_join=True) def test_expires_defaults_to_config(self): self.app.conf.CELERY_TASK_RESULT_EXPIRES = 10 - b = self.Backend(expires=None, app=self.app) + b = self.Backend(expires=None, app=self.app, new_join=True) self.assertEqual(b.expires, 10) def test_expires_is_int(self): - b = self.Backend(expires=48, app=self.app) + b = self.Backend(expires=48, app=self.app, new_join=True) self.assertEqual(b.expires, 48) + def test_set_new_join_from_url_query(self): + b = self.Backend('redis://?new_join=True;foobar=1', app=self.app) + self.assertEqual(b.on_chord_part_return, b._new_chord_return) + self.assertEqual(b.apply_chord, b._new_chord_apply) + + def test_default_is_old_join(self): + b = self.Backend(app=self.app) + self.assertNotEqual(b.on_chord_part_return, b._new_chord_return) + self.assertNotEqual(b.apply_chord, b._new_chord_apply) + def test_expires_is_None(self): - b = self.Backend(expires=None, app=self.app) + b = self.Backend(expires=None, app=self.app, new_join=True) self.assertEqual(b.expires, timedelta_seconds( self.app.conf.CELERY_TASK_RESULT_EXPIRES)) def test_expires_is_timedelta(self): - b = self.Backend(expires=timedelta(minutes=1), app=self.app) + b = self.Backend( + expires=timedelta(minutes=1), app=self.app, new_join=1, + ) self.assertEqual(b.expires, 60) def test_apply_chord(self): - self.Backend(app=self.app).apply_chord( + self.Backend(app=self.app, new_join=True).apply_chord( group(app=self.app), (), 'group_id', {}, result=[self.app.AsyncResult(x) for x in [1, 2, 3]], ) def test_mget(self): - b = self.MockBackend(app=self.app) + b = self.Backend(app=self.app, new_join=True) self.assertTrue(b.mget(['a', 'b', 'c'])) b.client.mget.assert_called_with(['a', 'b', 'c']) def test_set_no_expire(self): - b = self.MockBackend(app=self.app) + b = self.Backend(app=self.app, new_join=True) b.expires = None b.set('foo', 'bar') @patch('celery.result.GroupResult.restore') def test_on_chord_part_return(self, restore): - b = self.MockBackend(app=self.app) - deps = Mock() - deps.__len__ = Mock() - deps.__len__.return_value = 10 - restore.return_value = deps - b.client.incr.return_value = 1 - task = Mock() - task.name = 'foobarbaz' - self.app.tasks['foobarbaz'] = task - task.request.chord = signature(task) - task.request.group = 'group_id' + b = self.Backend(app=self.app, new_join=True) - b.on_chord_part_return(task) - self.assertTrue(b.client.incr.call_count) + def create_task(): + tid = uuid() + task = Mock(name='task-{0}'.format(tid)) + task.name = 'foobarbaz' + self.app.tasks['foobarbaz'] = task + task.request.chord = signature(task) + task.request.id = tid + task.request.chord['chord_size'] = 10 + task.request.group = 'group_id' + return task - b.client.incr.return_value = len(deps) - b.on_chord_part_return(task) - deps.join_native.assert_called_with(propagate=True, timeout=3.0) - deps.delete.assert_called_with() + tasks = [create_task() for i in range(10)] - self.assertTrue(b.client.expire.call_count) + for i in range(10): + b.on_chord_part_return(tasks[i], states.SUCCESS, i) + self.assertTrue(b.client.rpush.call_count) + b.client.rpush.reset_mock() + self.assertTrue(b.client.lrange.call_count) + gkey = b.get_key_for_group('group_id', '.j') + b.client.delete.assert_called_with(gkey) + b.client.expire.assert_called_witeh(gkey, 86400) def test_process_cleanup(self): - self.Backend(app=self.app).process_cleanup() + self.Backend(app=self.app, new_join=True).process_cleanup() def test_get_set_forget(self): - b = self.Backend(app=self.app) + b = self.Backend(app=self.app, new_join=True) tid = uuid() b.store_result(tid, 42, states.SUCCESS) self.assertEqual(b.get_status(tid), states.SUCCESS) @@ -225,8 +266,10 @@ class test_RedisBackend(AppCase): self.assertEqual(b.get_status(tid), states.PENDING) def test_set_expires(self): - b = self.Backend(expires=512, app=self.app) + b = self.Backend(expires=512, app=self.app, new_join=True) tid = uuid() key = b.get_key_for_task(tid) b.store_result(tid, 42, states.SUCCESS) - self.assertEqual(b.client.expiry[key], 512) + b.client.expire.assert_called_with( + key, 512, + ) diff --git a/awx/lib/site-packages/celery/tests/bin/test_base.py b/awx/lib/site-packages/celery/tests/bin/test_base.py index 907817d194..8d1d0d55dd 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_base.py +++ b/awx/lib/site-packages/celery/tests/bin/test_base.py @@ -241,21 +241,21 @@ class test_Command(AppCase): with self.assertRaises(AttributeError): cmd.find_app(__name__) - def test_simple_format(self): + def test_host_format(self): cmd = MockCommand(app=self.app) with patch('socket.gethostname') as hn: hn.return_value = 'blacktron.example.com' - self.assertEqual(cmd.simple_format(''), '') + self.assertEqual(cmd.host_format(''), '') self.assertEqual( - cmd.simple_format('celery@%h'), + cmd.host_format('celery@%h'), 'celery@blacktron.example.com', ) self.assertEqual( - cmd.simple_format('celery@%d'), + cmd.host_format('celery@%d'), 'celery@example.com', ) self.assertEqual( - cmd.simple_format('celery@%n'), + cmd.host_format('celery@%n'), 'celery@blacktron', ) diff --git a/awx/lib/site-packages/celery/tests/bin/test_celery.py b/awx/lib/site-packages/celery/tests/bin/test_celery.py index 6864831b6a..fbfdb62f69 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_celery.py +++ b/awx/lib/site-packages/celery/tests/bin/test_celery.py @@ -195,11 +195,11 @@ class test_purge(AppCase): out = WhateverIO() a = purge(app=self.app, stdout=out) purge_.return_value = 0 - a.run() + a.run(force=True) self.assertIn('No messages purged', out.getvalue()) purge_.return_value = 100 - a.run() + a.run(force=True) self.assertIn('100 messages', out.getvalue()) diff --git a/awx/lib/site-packages/celery/tests/bin/test_worker.py b/awx/lib/site-packages/celery/tests/bin/test_worker.py index 8a8f6ef6a2..fbb7c52c22 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_worker.py +++ b/awx/lib/site-packages/celery/tests/bin/test_worker.py @@ -14,7 +14,9 @@ from celery import signals from celery.app import trace from celery.apps import worker as cd from celery.bin.worker import worker, main as worker_main -from celery.exceptions import ImproperlyConfigured, SystemTerminate +from celery.exceptions import ( + ImproperlyConfigured, WorkerShutdown, WorkerTerminate, +) from celery.utils.log import ensure_process_aware_logger from celery.worker import state @@ -514,12 +516,12 @@ class test_signal_handlers(WorkerAppCase): c.return_value = 1 p, platforms.signals = platforms.signals, Signals() try: - with self.assertRaises(SystemExit): + with self.assertRaises(WorkerShutdown): handlers['SIGINT']('SIGINT', object()) finally: platforms.signals = p - with self.assertRaises(SystemTerminate): + with self.assertRaises(WorkerTerminate): next_handlers['SIGINT']('SIGINT', object()) @disable_stdouts @@ -546,7 +548,7 @@ class test_signal_handlers(WorkerAppCase): try: worker = self._Worker() handlers = self.psig(cd.install_worker_int_handler, worker) - with self.assertRaises(SystemExit): + with self.assertRaises(WorkerShutdown): handlers['SIGINT']('SIGINT', object()) finally: process.name = name @@ -582,7 +584,7 @@ class test_signal_handlers(WorkerAppCase): worker = self._Worker() handlers = self.psig( cd.install_worker_term_hard_handler, worker) - with self.assertRaises(SystemTerminate): + with self.assertRaises(WorkerTerminate): handlers['SIGQUIT']('SIGQUIT', object()) finally: process.name = name @@ -606,7 +608,7 @@ class test_signal_handlers(WorkerAppCase): worker = self._Worker() handlers = self.psig(cd.install_worker_term_handler, worker) try: - with self.assertRaises(SystemExit): + with self.assertRaises(WorkerShutdown): handlers['SIGTERM']('SIGTERM', object()) finally: state.should_stop = False @@ -638,16 +640,17 @@ class test_signal_handlers(WorkerAppCase): c.return_value = 1 worker = self._Worker() handlers = self.psig(cd.install_worker_term_handler, worker) - with self.assertRaises(SystemExit): + with self.assertRaises(WorkerShutdown): handlers['SIGTERM']('SIGTERM', object()) finally: process.name = name state.should_stop = False @disable_stdouts + @patch('celery.platforms.close_open_fds') @patch('atexit.register') @patch('os.close') - def test_worker_restart_handler(self, _close, register): + def test_worker_restart_handler(self, _close, register, close_open): if getattr(os, 'execv', None) is None: raise SkipTest('platform does not have excv') argv = [] @@ -687,5 +690,5 @@ class test_signal_handlers(WorkerAppCase): c.return_value = 1 worker = self._Worker() handlers = self.psig(cd.install_worker_term_hard_handler, worker) - with self.assertRaises(SystemTerminate): + with self.assertRaises(WorkerTerminate): handlers['SIGQUIT']('SIGQUIT', object()) diff --git a/awx/lib/site-packages/celery/tests/case.py b/awx/lib/site-packages/celery/tests/case.py index 465d2b080e..c96fd8ec0c 100644 --- a/awx/lib/site-packages/celery/tests/case.py +++ b/awx/lib/site-packages/celery/tests/case.py @@ -11,12 +11,14 @@ except AttributeError: import importlib import inspect import logging +import numbers import os import platform import re import sys import threading import time +import types import warnings from contextlib import contextmanager @@ -37,6 +39,7 @@ from kombu.utils import nested, symbol_by_name from celery import Celery from celery.app import current_app from celery.backends.cache import CacheBackend, DummyClient +from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning from celery.five import ( WhateverIO, builtins, items, reraise, string_t, values, open_fqdn, @@ -45,7 +48,7 @@ from celery.utils.functional import noop from celery.utils.imports import qualname __all__ = [ - 'Case', 'AppCase', 'Mock', 'MagicMock', + 'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', 'patch', 'call', 'sentinel', 'skip_unless_module', 'wrap_logger', 'with_environ', 'sleepdeprived', 'skip_if_environ', 'todo', 'skip', 'skip_if', @@ -59,6 +62,9 @@ patch = mock.patch call = mock.call sentinel = mock.sentinel MagicMock = mock.MagicMock +ANY = mock.ANY + +PY3 = sys.version_info[0] == 3 CASE_REDEFINES_SETUP = """\ {name} (subclass of AppCase) redefines private "setUp", should be: "setup"\ @@ -162,6 +168,35 @@ def ContextMock(*args, **kwargs): return obj +def _bind(f, o): + @wraps(f) + def bound_meth(*fargs, **fkwargs): + return f(o, *fargs, **fkwargs) + return bound_meth + + +if PY3: # pragma: no cover + def _get_class_fun(meth): + return meth +else: + def _get_class_fun(meth): + return meth.__func__ + + +class MockCallbacks(object): + + def __new__(cls, *args, **kwargs): + r = Mock(name=cls.__name__) + _get_class_fun(cls.__init__)(r, *args, **kwargs) + for key, value in items(vars(cls)): + if key not in ('__dict__', '__weakref__', '__new__', '__init__'): + if inspect.ismethod(value) or inspect.isfunction(value): + r.__getattr__(key).side_effect = _bind(value, r) + else: + r.__setattr__(key, value) + return r + + def skip_unless_module(module): def _inner(fun): @@ -193,6 +228,18 @@ class _AssertRaisesBaseContext(object): self.expected_regex = expected_regex +def _is_magic_module(m): + # some libraries create custom module types that are lazily + # lodaded, e.g. Django installs some modules in sys.modules that + # will load _tkinter and other shit when touched. + + # pyflakes refuses to accept 'noqa' for this isinstance. + cls, modtype = m.__class__, types.ModuleType + return (not cls is modtype and ( + '__getattr__' in vars(m.__class__) or + '__getattribute__' in vars(m.__class__))) + + class _AssertWarnsContext(_AssertRaisesBaseContext): """A context manager used to implement TestCase.assertWarns* methods.""" @@ -201,8 +248,17 @@ class _AssertWarnsContext(_AssertRaisesBaseContext): # to work properly. warnings.resetwarnings() for v in list(values(sys.modules)): - if getattr(v, '__warningregistry__', None): - v.__warningregistry__ = {} + # do not evaluate Django moved modules and other lazily + # initialized modules. + if v and not _is_magic_module(v): + # use raw __getattribute__ to protect even better from + # lazily loaded modules + try: + object.__getattribute__(v, '__warningregistry__') + except AttributeError: + pass + else: + object.__setattr__(v, '__warningregistry__', {}) self.warnings_manager = warnings.catch_warnings(record=True) self.warnings = self.warnings_manager.__enter__() warnings.simplefilter('always', self.expected) @@ -253,6 +309,18 @@ class Case(unittest.TestCase): return _AssertWarnsContext(expected_warning, self, None, expected_regex) + @contextmanager + def assertDeprecated(self): + with self.assertWarnsRegex(CDeprecationWarning, + r'scheduled for removal'): + yield + + @contextmanager + def assertPendingDeprecation(self): + with self.assertWarnsRegex(CPendingDeprecationWarning, + r'scheduled for deprecation'): + yield + def assertDictContainsSubset(self, expected, actual, msg=None): missing, mismatched = [], [] @@ -760,7 +828,7 @@ def body_from_sig(app, sig, utc=True): if eta and isinstance(eta, datetime): eta = eta.isoformat() expires = sig.options.pop('expires', None) - if expires and isinstance(expires, int): + if expires and isinstance(expires, numbers.Real): expires = app.now() + timedelta(seconds=expires) if expires and isinstance(expires, datetime): expires = expires.isoformat() diff --git a/awx/lib/site-packages/celery/tests/events/test_state.py b/awx/lib/site-packages/celery/tests/events/test_state.py index d7387966f1..b7e35d7cf1 100644 --- a/awx/lib/site-packages/celery/tests/events/test_state.py +++ b/awx/lib/site-packages/celery/tests/events/test_state.py @@ -18,7 +18,7 @@ from celery.events.state import ( ) from celery.five import range from celery.utils import uuid -from celery.tests.case import AppCase, patch +from celery.tests.case import AppCase, Mock, patch try: Decimal(2.6) @@ -210,8 +210,10 @@ class test_Worker(AppCase): worker = Worker(hostname='foo') worker.event(None, time(), time()) self.assertEqual(len(worker.heartbeats), 1) + h1 = worker.heartbeats[0] worker.event(None, time(), time() - 10) - self.assertEqual(len(worker.heartbeats), 1) + self.assertEqual(len(worker.heartbeats), 2) + self.assertEqual(worker.heartbeats[-1], h1) class test_Task(AppCase): @@ -487,6 +489,36 @@ class test_State(AppCase): 'foo': 'bar', }) + def test_survives_unknown_worker_leaving(self): + s = State(on_node_leave=Mock(name='on_node_leave')) + (worker, created), subject = s.event({ + 'type': 'worker-offline', + 'hostname': 'unknown@vandelay.com', + 'timestamp': time(), + 'local_received': time(), + 'clock': 301030134894833, + }) + self.assertEqual(worker, Worker('unknown@vandelay.com')) + self.assertFalse(created) + self.assertEqual(subject, 'offline') + self.assertNotIn('unknown@vandelay.com', s.workers) + s.on_node_leave.assert_called_with(worker) + + def test_on_node_join_callback(self): + s = State(on_node_join=Mock(name='on_node_join')) + (worker, created), subject = s.event({ + 'type': 'worker-online', + 'hostname': 'george@vandelay.com', + 'timestamp': time(), + 'local_received': time(), + 'clock': 34314, + }) + self.assertTrue(worker) + self.assertTrue(created) + self.assertEqual(subject, 'online') + self.assertIn('george@vandelay.com', s.workers) + s.on_node_join.assert_called_with(worker) + def test_survives_unknown_task_event(self): s = State() s.event( diff --git a/awx/lib/site-packages/celery/tests/security/case.py b/awx/lib/site-packages/celery/tests/security/case.py index 4440f4963a..ba421a9d57 100644 --- a/awx/lib/site-packages/celery/tests/security/case.py +++ b/awx/lib/site-packages/celery/tests/security/case.py @@ -2,10 +2,14 @@ from __future__ import absolute_import from celery.tests.case import AppCase, SkipTest +import sys + class SecurityCase(AppCase): def setup(self): + if sys.version_info[0] == 3: + raise SkipTest('PyOpenSSL does not work on Python 3') try: from OpenSSL import crypto # noqa except ImportError: diff --git a/awx/lib/site-packages/celery/tests/security/test_certificate.py b/awx/lib/site-packages/celery/tests/security/test_certificate.py index 84b7d8a552..4b07b5a98f 100644 --- a/awx/lib/site-packages/celery/tests/security/test_certificate.py +++ b/awx/lib/site-packages/celery/tests/security/test_certificate.py @@ -16,7 +16,7 @@ class test_Certificate(SecurityCase): Certificate(CERT2) def test_invalid_certificate(self): - self.assertRaises(TypeError, Certificate, None) + self.assertRaises((SecurityError, TypeError), Certificate, None) self.assertRaises(SecurityError, Certificate, '') self.assertRaises(SecurityError, Certificate, 'foo') self.assertRaises(SecurityError, Certificate, CERT1[:20] + CERT1[21:]) diff --git a/awx/lib/site-packages/celery/tests/security/test_key.py b/awx/lib/site-packages/celery/tests/security/test_key.py index 9e286e98b0..d8551b26b4 100644 --- a/awx/lib/site-packages/celery/tests/security/test_key.py +++ b/awx/lib/site-packages/celery/tests/security/test_key.py @@ -14,7 +14,7 @@ class test_PrivateKey(SecurityCase): PrivateKey(KEY2) def test_invalid_private_key(self): - self.assertRaises(TypeError, PrivateKey, None) + self.assertRaises((SecurityError, TypeError), PrivateKey, None) self.assertRaises(SecurityError, PrivateKey, '') self.assertRaises(SecurityError, PrivateKey, 'foo') self.assertRaises(SecurityError, PrivateKey, KEY1[:20] + KEY1[21:]) diff --git a/awx/lib/site-packages/celery/tests/tasks/test_result.py b/awx/lib/site-packages/celery/tests/tasks/test_result.py index 1fcea2853e..dbaf3f4d4e 100644 --- a/awx/lib/site-packages/celery/tests/tasks/test_result.py +++ b/awx/lib/site-packages/celery/tests/tasks/test_result.py @@ -66,15 +66,15 @@ class test_AsyncResult(AppCase): def test_children(self): x = self.app.AsyncResult('1') children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] + x._cache = {'children': children, 'status': states.SUCCESS} x.backend = Mock() - x.backend.get_children.return_value = children - x.backend.READY_STATES = states.READY_STATES self.assertTrue(x.children) self.assertEqual(len(x.children), 3) def test_propagates_for_parent(self): x = self.app.AsyncResult(uuid()) x.backend = Mock() + x.backend.get_task_meta.return_value = {} x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE) with self.assertRaises(KeyError): x.get(propagate=True) @@ -89,10 +89,11 @@ class test_AsyncResult(AppCase): x = self.app.AsyncResult(tid) child = [self.app.AsyncResult(uuid()).as_tuple() for i in range(10)] - x.backend._cache[tid] = {'children': child} + x._cache = {'children': child} self.assertTrue(x.children) self.assertEqual(len(x.children), 10) + x._cache = {'status': states.SUCCESS} x.backend._cache[tid] = {'result': None} self.assertIsNone(x.children) @@ -122,13 +123,11 @@ class test_AsyncResult(AppCase): def test_iterdeps(self): x = self.app.AsyncResult('1') - x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None} c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] + x._cache = {'status': states.SUCCESS, 'result': None, 'children': c} for child in c: child.backend = Mock() child.backend.get_children.return_value = [] - x.backend.get_children = Mock() - x.backend.get_children.return_value = c it = x.iterdeps() self.assertListEqual(list(it), [ (None, x), @@ -136,7 +135,7 @@ class test_AsyncResult(AppCase): (x, c[1]), (x, c[2]), ]) - x.backend._cache.pop('1') + x._cache = None x.ready = Mock() x.ready.return_value = False with self.assertRaises(IncompleteStream): @@ -311,17 +310,19 @@ class test_ResultSet(AppCase): x = self.app.ResultSet([r1, r2]) with self.dummy_copy(): with patch('celery.result.time') as _time: - with self.assertRaises(KeyError): - list(x.iterate()) + with self.assertPendingDeprecation(): + with self.assertRaises(KeyError): + list(x.iterate()) _time.sleep.assert_called_with(10) backend.subpolling_interval = 0 with patch('celery.result.time') as _time: - with self.assertRaises(KeyError): - ready.return_value = False - ready.side_effect = se - list(x.iterate()) - self.assertFalse(_time.sleep.called) + with self.assertPendingDeprecation(): + with self.assertRaises(KeyError): + ready.return_value = False + ready.side_effect = se + list(x.iterate()) + self.assertFalse(_time.sleep.called) def test_times_out(self): r1 = self.app.AsyncResult(uuid) @@ -330,8 +331,9 @@ class test_ResultSet(AppCase): x = self.app.ResultSet([r1]) with self.dummy_copy(): with patch('celery.result.time'): - with self.assertRaises(TimeoutError): - list(x.iterate(timeout=1)) + with self.assertPendingDeprecation(): + with self.assertRaises(TimeoutError): + list(x.iterate(timeout=1)) def test_add_discard(self): x = self.app.ResultSet([]) @@ -449,7 +451,8 @@ class test_GroupResult(AppCase): def test_iterate_raises(self): ar = MockAsyncResultFailure(uuid(), app=self.app) ts = self.app.GroupResult(uuid(), [ar]) - it = ts.iterate() + with self.assertPendingDeprecation(): + it = ts.iterate() with self.assertRaises(KeyError): next(it) @@ -530,7 +533,8 @@ class test_GroupResult(AppCase): ar = MockAsyncResultSuccess(uuid(), app=self.app) ar2 = MockAsyncResultSuccess(uuid(), app=self.app) ts = self.app.GroupResult(uuid(), [ar, ar2]) - it = ts.iterate() + with self.assertPendingDeprecation(): + it = ts.iterate() self.assertEqual(next(it), 42) self.assertEqual(next(it), 42) @@ -538,7 +542,8 @@ class test_GroupResult(AppCase): ar1 = EagerResult(uuid(), 42, states.SUCCESS) ar2 = EagerResult(uuid(), 42, states.SUCCESS) ts = self.app.GroupResult(uuid(), [ar1, ar2]) - it = ts.iterate() + with self.assertPendingDeprecation(): + it = ts.iterate() self.assertEqual(next(it), 42) self.assertEqual(next(it), 42) @@ -560,7 +565,8 @@ class test_GroupResult(AppCase): self.assertListEqual(list(ts.iter_native()), []) def test_iterate_simple(self): - it = self.ts.iterate() + with self.assertPendingDeprecation(): + it = self.ts.iterate() results = sorted(list(it)) self.assertListEqual(results, list(range(self.size))) @@ -610,7 +616,8 @@ class test_failed_AsyncResult(test_GroupResult): self.assertEqual(self.ts.completed_count(), len(self.ts) - 1) def test_iterate_simple(self): - it = self.ts.iterate() + with self.assertPendingDeprecation(): + it = self.ts.iterate() def consume(): return list(it) diff --git a/awx/lib/site-packages/celery/tests/tasks/test_trace.py b/awx/lib/site-packages/celery/tests/tasks/test_trace.py index ef088d60dd..12c6280ef4 100644 --- a/awx/lib/site-packages/celery/tests/tasks/test_trace.py +++ b/awx/lib/site-packages/celery/tests/tasks/test_trace.py @@ -101,7 +101,7 @@ class test_trace(TraceCase): add.backend = Mock() self.trace(add, (2, 2), {}, request={'chord': uuid()}) - add.backend.on_chord_part_return.assert_called_with(add) + add.backend.on_chord_part_return.assert_called_with(add, 'SUCCESS', 4) def test_when_backend_cleanup_raises(self): diff --git a/awx/lib/site-packages/celery/tests/utils/test_datastructures.py b/awx/lib/site-packages/celery/tests/utils/test_datastructures.py index a149336f10..f26fe86f73 100644 --- a/awx/lib/site-packages/celery/tests/utils/test_datastructures.py +++ b/awx/lib/site-packages/celery/tests/utils/test_datastructures.py @@ -170,7 +170,7 @@ class test_LimitedSet(Case): def setUp(self): if sys.platform == 'win32': - raise SkipTest('Not working in Windows') + raise SkipTest('Not working on Windows') def test_add(self): if sys.platform == 'win32': @@ -231,7 +231,8 @@ class test_LimitedSet(Case): self.assertEqual(pickle.loads(pickle.dumps(s)), s) def test_iter(self): - raise SkipTest('Not working on Windows') + if sys.platform == 'win32': + raise SkipTest('Not working on Windows') s = LimitedSet(maxlen=3) items = ['foo', 'bar', 'baz', 'xaz'] for item in items: diff --git a/awx/lib/site-packages/celery/tests/utils/test_functional.py b/awx/lib/site-packages/celery/tests/utils/test_functional.py index b0b65822b5..79085417c5 100644 --- a/awx/lib/site-packages/celery/tests/utils/test_functional.py +++ b/awx/lib/site-packages/celery/tests/utils/test_functional.py @@ -72,21 +72,21 @@ class test_LRUCache(Case): def __init__(self, cache): self.cache = cache - self._is_shutdown = Event() - self._is_stopped = Event() + self.__is_shutdown = Event() + self.__is_stopped = Event() Thread.__init__(self) def run(self): - while not self._is_shutdown.isSet(): + while not self.__is_shutdown.isSet(): try: self.cache.data.popitem(last=False) except KeyError: break - self._is_stopped.set() + self.__is_stopped.set() def stop(self): - self._is_shutdown.set() - self._is_stopped.wait() + self.__is_shutdown.set() + self.__is_stopped.wait() self.join(THREAD_TIMEOUT_MAX) burglar = Burglar(x) diff --git a/awx/lib/site-packages/celery/tests/utils/test_local.py b/awx/lib/site-packages/celery/tests/utils/test_local.py index 324a207b6b..2b50efcdae 100644 --- a/awx/lib/site-packages/celery/tests/utils/test_local.py +++ b/awx/lib/site-packages/celery/tests/utils/test_local.py @@ -170,10 +170,10 @@ class test_Proxy(Case): class O(object): def __complex__(self): - return 10.333 + return complex(10.333) o = Proxy(O) - self.assertEqual(o.__complex__(), 10.333) + self.assertEqual(o.__complex__(), complex(10.333)) def test_index(self): @@ -329,6 +329,30 @@ class test_PromiseProxy(Case): self.assertEqual(p.attr, 123) self.assertEqual(X.evals, 1) + def test_callbacks(self): + source = Mock(name='source') + p = PromiseProxy(source) + cbA = Mock(name='cbA') + cbB = Mock(name='cbB') + cbC = Mock(name='cbC') + p.__then__(cbA, p) + p.__then__(cbB, p) + self.assertFalse(p.__evaluated__()) + self.assertTrue(object.__getattribute__(p, '__pending__')) + + self.assertTrue(repr(p)) + with self.assertRaises(AttributeError): + object.__getattribute__(p, '__pending__') + cbA.assert_called_with(p) + cbB.assert_called_with(p) + + self.assertTrue(p.__evaluated__()) + p.__then__(cbC, p) + cbC.assert_called_with(p) + + with self.assertRaises(AttributeError): + object.__getattribute__(p, '__pending__') + def test_maybe_evaluate(self): x = PromiseProxy(lambda: 30) self.assertFalse(x.__evaluated__()) diff --git a/awx/lib/site-packages/celery/tests/utils/test_platforms.py b/awx/lib/site-packages/celery/tests/utils/test_platforms.py index 587e2c0351..4e27efd7b0 100644 --- a/awx/lib/site-packages/celery/tests/utils/test_platforms.py +++ b/awx/lib/site-packages/celery/tests/utils/test_platforms.py @@ -416,8 +416,10 @@ if not platforms.IS_WINDOWS: p = Pidfile.return_value = Mock() p.is_locked.return_value = True p.remove_if_stale.return_value = False - with self.assertRaises(SystemExit): - create_pidlock('/var/pid') + with override_stdouts() as (_, err): + with self.assertRaises(SystemExit): + create_pidlock('/var/pid') + self.assertIn('already exists', err.getvalue()) p.remove_if_stale.return_value = True ret = create_pidlock('/var/pid') diff --git a/awx/lib/site-packages/celery/tests/utils/test_timeutils.py b/awx/lib/site-packages/celery/tests/utils/test_timeutils.py index 5849597cb4..2258d064d1 100644 --- a/awx/lib/site-packages/celery/tests/utils/test_timeutils.py +++ b/awx/lib/site-packages/celery/tests/utils/test_timeutils.py @@ -66,6 +66,9 @@ class test_iso8601(Case): iso2 = iso.replace('+00:00', '+01:00') d2 = parse_iso8601(iso2) self.assertEqual(d2.tzinfo._minutes, +60) + iso3 = iso.replace('+00:00', 'Z') + d3 = parse_iso8601(iso3) + self.assertEqual(d3.tzinfo, pytz.UTC) class test_timeutils(Case): diff --git a/awx/lib/site-packages/celery/tests/worker/test_consumer.py b/awx/lib/site-packages/celery/tests/worker/test_consumer.py index d5dea26d75..81199b85df 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_consumer.py +++ b/awx/lib/site-packages/celery/tests/worker/test_consumer.py @@ -439,6 +439,7 @@ class test_Gossip(AppCase): c.app.events.State.assert_called_with( on_node_join=g.on_node_join, on_node_leave=g.on_node_leave, + max_tasks_in_memory=1, ) g.update_state = Mock() worker = Mock() diff --git a/awx/lib/site-packages/celery/tests/worker/test_hub.py b/awx/lib/site-packages/celery/tests/worker/test_hub.py index 8a4328c6e3..4e9e4906e4 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_hub.py +++ b/awx/lib/site-packages/celery/tests/worker/test_hub.py @@ -1,7 +1,7 @@ from __future__ import absolute_import from kombu.async import Hub, READ, WRITE, ERR -from kombu.async.hub import repr_flag, _rcb +from kombu.async.debug import callback_for, repr_flag, _rcb from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore from celery.five import range @@ -53,8 +53,12 @@ class test_LaxBoundedSemaphore(Case): self.assertEqual(x.value, 0) self.assertFalse(c3.called) + x.release() + self.assertEqual(x.value, 0) x.release() self.assertEqual(x.value, 1) + x.release() + self.assertEqual(x.value, 2) c3.assert_called_with(3) def test_bounded(self): @@ -83,7 +87,7 @@ class test_LaxBoundedSemaphore(Case): x.grow(2) cb2.assert_called_with(2) cb3.assert_called_with(3) - self.assertEqual(x.value, 3) + self.assertEqual(x.value, 2) self.assertEqual(x.initial_value, 3) self.assertFalse(x._waiting) @@ -234,11 +238,11 @@ class test_Hub(Case): hub.readers = {6: reader} hub.writers = {7: writer} - self.assertEqual(hub._callback_for(6, READ), reader) - self.assertEqual(hub._callback_for(7, WRITE), writer) + self.assertEqual(callback_for(hub, 6, READ), reader) + self.assertEqual(callback_for(hub, 7, WRITE), writer) with self.assertRaises(KeyError): - hub._callback_for(6, WRITE) - self.assertEqual(hub._callback_for(6, WRITE, 'foo'), 'foo') + callback_for(hub, 6, WRITE) + self.assertEqual(callback_for(hub, 6, WRITE, 'foo'), 'foo') def test_add_remove_readers(self): hub = Hub() @@ -251,7 +255,7 @@ class test_Hub(Case): P.register.assert_has_calls([ call(10, hub.READ | hub.ERR), - call(File(11), hub.READ | hub.ERR), + call(11, hub.READ | hub.ERR), ], any_order=True) self.assertEqual(hub.readers[10], (read_A, (10, ))) @@ -289,7 +293,7 @@ class test_Hub(Case): P.register.assert_has_calls([ call(20, hub.WRITE), - call(File(21), hub.WRITE), + call(21, hub.WRITE), ], any_order=True) self.assertEqual(hub.writers[20], (write_A, ())) @@ -321,6 +325,7 @@ class test_Hub(Case): self.assertTrue(hub.readers) self.assertTrue(hub.writers) finally: + assert hub.poller hub.close() self.assertFalse(hub.readers) self.assertFalse(hub.writers) diff --git a/awx/lib/site-packages/celery/tests/worker/test_loops.py b/awx/lib/site-packages/celery/tests/worker/test_loops.py index 48653d50d0..00c5d960f1 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_loops.py +++ b/awx/lib/site-packages/celery/tests/worker/test_loops.py @@ -5,7 +5,7 @@ import socket from kombu.async import Hub, READ, WRITE, ERR from celery.bootsteps import CLOSE, RUN -from celery.exceptions import InvalidTaskError, SystemTerminate +from celery.exceptions import InvalidTaskError, WorkerShutdown, WorkerTerminate from celery.five import Empty from celery.worker import state from celery.worker.consumer import Consumer @@ -36,6 +36,9 @@ class X(object): heartbeat, Mock(name='clock')] self.connection.supports_heartbeats = True + self.connection.get_heartbeat_interval.side_effect = ( + lambda: self.heartbeat + ) self.consumer.callbacks = [] self.obj.strategies = {} self.connection.connection_errors = (socket.error, ) @@ -174,7 +177,7 @@ class test_asynloop(AppCase): # XXX why aren't the errors propagated?!? state.should_terminate = True try: - with self.assertRaises(SystemTerminate): + with self.assertRaises(WorkerTerminate): asynloop(*x.args) finally: state.should_terminate = False @@ -185,7 +188,7 @@ class test_asynloop(AppCase): state.should_terminate = True x.hub.close.side_effect = MemoryError() try: - with self.assertRaises(SystemTerminate): + with self.assertRaises(WorkerTerminate): asynloop(*x.args) finally: state.should_terminate = False @@ -194,7 +197,7 @@ class test_asynloop(AppCase): x = X(self.app) state.should_stop = True try: - with self.assertRaises(SystemExit): + with self.assertRaises(WorkerShutdown): asynloop(*x.args) finally: state.should_stop = False @@ -222,64 +225,70 @@ class test_asynloop(AppCase): x.hub.timer._queue = [1] x.close_then_error(x.hub.poller.poll) x.hub.fire_timers.return_value = 33.37 - x.hub.poller.poll.return_value = [] + poller = x.hub.poller + poller.poll.return_value = [] with self.assertRaises(socket.error): asynloop(*x.args) - x.hub.poller.poll.assert_called_with(33.37) + poller.poll.assert_called_with(33.37) def test_poll_readable(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4)) - x.hub.poller.poll.return_value = [(6, READ)] + poller = x.hub.poller + poller.poll.return_value = [(6, READ)] with self.assertRaises(socket.error): asynloop(*x.args) reader.assert_called_with(6) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) def test_poll_readable_raises_Empty(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, READ)] + poller = x.hub.poller + poller.poll.return_value = [(6, READ)] reader.side_effect = Empty() with self.assertRaises(socket.error): asynloop(*x.args) reader.assert_called_with(6) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) def test_poll_writable(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, WRITE)] + poller = x.hub.poller + poller.poll.return_value = [(6, WRITE)] with self.assertRaises(socket.error): asynloop(*x.args) writer.assert_called_with(6) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) def test_poll_writable_none_registered(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(7, WRITE)] + poller = x.hub.poller + poller.poll.return_value = [(7, WRITE)] with self.assertRaises(socket.error): asynloop(*x.args) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) def test_poll_unknown_event(self): x = X(self.app) writer = Mock(name='reader') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, 0)] + poller = x.hub.poller + poller.poll.return_value = [(6, 0)] with self.assertRaises(socket.error): asynloop(*x.args) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) def test_poll_keep_draining_disabled(self): x = X(self.app) @@ -290,21 +299,23 @@ class test_asynloop(AppCase): poll.side_effect = socket.error() poll.side_effect = se - x.hub.poller.poll.return_value = [(6, 0)] + poller = x.hub.poller + poll.return_value = [(6, 0)] with self.assertRaises(socket.error): asynloop(*x.args) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) def test_poll_err_writable(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6, 48) x.hub.on_tick.add(x.close_then_error(Mock(), 2)) - x.hub.poller.poll.return_value = [(6, ERR)] + poller = x.hub.poller + poller.poll.return_value = [(6, ERR)] with self.assertRaises(socket.error): asynloop(*x.args) writer.assert_called_with(6, 48) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) def test_poll_write_generator(self): x = X(self.app) @@ -359,18 +370,20 @@ class test_asynloop(AppCase): reader = Mock(name='reader') x.hub.add_reader(6, reader, 6, 24) x.hub.on_tick.add(x.close_then_error(Mock(), 2)) - x.hub.poller.poll.return_value = [(6, ERR)] + poller = x.hub.poller + poller.poll.return_value = [(6, ERR)] with self.assertRaises(socket.error): asynloop(*x.args) reader.assert_called_with(6, 24) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) def test_poll_raises_ValueError(self): x = X(self.app) x.hub.readers = {6: Mock()} - x.close_then_error(x.hub.poller.poll, exc=ValueError) + poller = x.hub.poller + x.close_then_error(poller.poll, exc=ValueError) asynloop(*x.args) - self.assertTrue(x.hub.poller.poll.called) + self.assertTrue(poller.poll.called) class test_synloop(AppCase): diff --git a/awx/lib/site-packages/celery/tests/worker/test_state.py b/awx/lib/site-packages/celery/tests/worker/test_state.py index eb92bb49a6..ede9a00a13 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_state.py +++ b/awx/lib/site-packages/celery/tests/worker/test_state.py @@ -5,7 +5,7 @@ import pickle from time import time from celery.datastructures import LimitedSet -from celery.exceptions import SystemTerminate +from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.worker import state from celery.tests.case import AppCase, Mock, patch @@ -53,12 +53,12 @@ class test_maybe_shutdown(AppCase): def test_should_stop(self): state.should_stop = True - with self.assertRaises(SystemExit): + with self.assertRaises(WorkerShutdown): state.maybe_shutdown() def test_should_terminate(self): state.should_terminate = True - with self.assertRaises(SystemTerminate): + with self.assertRaises(WorkerTerminate): state.maybe_shutdown() diff --git a/awx/lib/site-packages/celery/tests/worker/test_worker.py b/awx/lib/site-packages/celery/tests/worker/test_worker.py index 3bd7cd3d34..b700a6ca61 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_worker.py +++ b/awx/lib/site-packages/celery/tests/worker/test_worker.py @@ -1,4 +1,4 @@ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import os import socket @@ -16,7 +16,9 @@ from celery.app.defaults import DEFAULTS from celery.bootsteps import RUN, CLOSE, StartStopStep from celery.concurrency.base import BasePool from celery.datastructures import AttributeDict -from celery.exceptions import SystemTerminate, TaskRevokedError +from celery.exceptions import ( + WorkerShutdown, WorkerTerminate, TaskRevokedError, +) from celery.five import Empty, range, Queue as FastQueue from celery.utils import uuid from celery.worker import components @@ -268,9 +270,9 @@ class test_Consumer(AppCase): l.event_dispatcher = mock_event_dispatcher() l.task_consumer = Mock() l.connection = Mock() - l.connection.drain_events.side_effect = SystemExit() + l.connection.drain_events.side_effect = WorkerShutdown() - with self.assertRaises(SystemExit): + with self.assertRaises(WorkerShutdown): l.loop(*l.loop_args()) self.assertTrue(l.task_consumer.register_callback.called) return l.task_consumer.register_callback.call_args[0][0] @@ -514,30 +516,56 @@ class test_Consumer(AppCase): self.assertTrue(logger.critical.call_count) def test_receive_message_eta(self): + import sys + from functools import partial + if os.environ.get('C_DEBUG_TEST'): + pp = partial(print, file=sys.__stderr__) + else: + def pp(*args, **kwargs): + pass + pp('TEST RECEIVE MESSAGE ETA') + pp('+CREATE MYKOMBUCONSUMER') l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + pp('-CREATE MYKOMBUCONSUMER') l.steps.pop() l.event_dispatcher = mock_event_dispatcher() backend = Mock() + pp('+ CREATE MESSAGE') m = create_message( backend, task=self.foo_task.name, args=[2, 4, 8], kwargs={}, eta=(datetime.now() + timedelta(days=1)).isoformat(), ) + pp('- CREATE MESSAGE') try: + pp('+ BLUEPRINT START 1') l.blueprint.start(l) + pp('- BLUEPRINT START 1') p = l.app.conf.BROKER_CONNECTION_RETRY l.app.conf.BROKER_CONNECTION_RETRY = False + pp('+ BLUEPRINT START 2') l.blueprint.start(l) + pp('- BLUEPRINT START 2') l.app.conf.BROKER_CONNECTION_RETRY = p + pp('+ BLUEPRINT RESTART') l.blueprint.restart(l) + pp('- BLUEPRINT RESTART') l.event_dispatcher = mock_event_dispatcher() + pp('+ GET ON MESSAGE') callback = self._get_on_message(l) + pp('- GET ON MESSAGE') + pp('+ CALLBACK') callback(m.decode(), m) + pp('- CALLBACK') finally: + pp('+ STOP TIMER') l.timer.stop() + pp('- STOP TIMER') try: + pp('+ JOIN TIMER') l.timer.join() + pp('- JOIN TIMER') except RuntimeError: pass @@ -918,10 +946,10 @@ class test_WorkController(AppCase): with self.assertRaises(KeyboardInterrupt): worker._process_task(task) - def test_process_task_raise_SystemTerminate(self): + def test_process_task_raise_WorkerTerminate(self): worker = self.worker worker.pool = Mock() - worker.pool.apply_async.side_effect = SystemTerminate() + worker.pool.apply_async.side_effect = WorkerTerminate() backend = Mock() m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], kwargs={}) @@ -946,7 +974,7 @@ class test_WorkController(AppCase): worker1 = self.create_worker() worker1.blueprint.state = RUN stc = MockStep() - stc.start.side_effect = SystemTerminate() + stc.start.side_effect = WorkerTerminate() worker1.steps = [stc] worker1.start() stc.start.assert_called_with(worker1) @@ -955,7 +983,7 @@ class test_WorkController(AppCase): worker2 = self.create_worker() worker2.blueprint.state = RUN sec = MockStep() - sec.start.side_effect = SystemExit() + sec.start.side_effect = WorkerShutdown() sec.terminate = None worker2.steps = [sec] worker2.start() diff --git a/awx/lib/site-packages/celery/utils/__init__.py b/awx/lib/site-packages/celery/utils/__init__.py index 5f3266353a..24205090bb 100644 --- a/awx/lib/site-packages/celery/utils/__init__.py +++ b/awx/lib/site-packages/celery/utils/__init__.py @@ -8,13 +8,16 @@ """ from __future__ import absolute_import, print_function +import numbers import os +import re import socket import sys import traceback import warnings import datetime +from collections import Callable from functools import partial, wraps from inspect import getargspec from pprint import pprint @@ -29,6 +32,8 @@ __all__ = ['worker_direct', 'warn_deprecated', 'deprecated', 'lpmerge', 'jsonify', 'gen_task_name', 'nodename', 'nodesplit', 'cached_property'] +PY3 = sys.version_info[0] == 3 + PENDING_DEPRECATION_FMT = """ {description} is scheduled for deprecation in \ @@ -57,6 +62,7 @@ WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq' NODENAME_SEP = '@' NODENAME_DEFAULT = 'celery' +RE_FORMAT = re.compile(r'%(\w)') def worker_direct(hostname): @@ -96,7 +102,7 @@ def deprecated(deprecation=None, removal=None, :keyword deprecation: Version that marks first deprecation, if this argument is not set a ``PendingDeprecationWarning`` will be emitted instead. - :keyword removed: Future version when this feature will be removed. + :keyword removal: Future version when this feature will be removed. :keyword alternative: Instructions for an alternative solution (if any). :keyword description: Description of what is being deprecated. @@ -253,7 +259,7 @@ def strtobool(term, table={'false': False, 'no': False, '0': False, def jsonify(obj, - builtin_types=(int, float, string_t), key=None, + builtin_types=(numbers.Real, string_t), key=None, keyfilter=None, unknown_type_filter=None): """Transforms object making it suitable for json serialization""" @@ -341,6 +347,44 @@ def default_nodename(hostname): name, host = nodesplit(hostname or '') return nodename(name or NODENAME_DEFAULT, host or socket.gethostname()) + +def node_format(s, nodename, **extra): + name, host = nodesplit(nodename) + return host_format( + s, host, n=name or NODENAME_DEFAULT, **extra) + + +def _fmt_process_index(prefix='', default='0'): + from .log import current_process_index + index = current_process_index() + return '{0}{1}'.format(prefix, index) if index else default +_fmt_process_index_with_prefix = partial(_fmt_process_index, '-', '') + + +def host_format(s, host=None, **extra): + host = host or socket.gethostname() + name, _, domain = host.partition('.') + keys = dict({ + 'h': host, 'n': name, 'd': domain, + 'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix, + }, **extra) + return simple_format(s, keys) + + +def simple_format(s, keys, pattern=RE_FORMAT, expand=r'\1'): + if s: + keys.setdefault('%', '%') + + def resolve(match): + resolver = keys[match.expand(expand)] + if isinstance(resolver, Callable): + return resolver() + return resolver + + return pattern.sub(resolve, s) + return s + + # ------------------------------------------------------------------------ # # > XXX Compat from .log import LOG_LEVELS # noqa diff --git a/awx/lib/site-packages/celery/utils/debug.py b/awx/lib/site-packages/celery/utils/debug.py index cdbc3e9c1c..79ac4e1e31 100644 --- a/awx/lib/site-packages/celery/utils/debug.py +++ b/awx/lib/site-packages/celery/utils/debug.py @@ -6,7 +6,7 @@ Utilities for debugging memory usage. """ -from __future__ import absolute_import, print_function +from __future__ import absolute_import, print_function, unicode_literals import os @@ -41,8 +41,10 @@ _mem_sample = [] def _on_blocking(signum, frame): import inspect raise RuntimeError( - 'Blocking detection timed-out at: %s' % ( - inspect.getframeinfo(frame), )) + 'Blocking detection timed-out at: {0}'.format( + inspect.getframeinfo(frame) + ) + ) @contextmanager diff --git a/awx/lib/site-packages/celery/utils/dispatch/signal.py b/awx/lib/site-packages/celery/utils/dispatch/signal.py index a2423d9788..7d4b337a9e 100644 --- a/awx/lib/site-packages/celery/utils/dispatch/signal.py +++ b/awx/lib/site-packages/celery/utils/dispatch/signal.py @@ -4,7 +4,9 @@ from __future__ import absolute_import import weakref from . import saferef + from celery.five import range +from celery.local import PromiseProxy, Proxy __all__ = ['Signal'] @@ -12,6 +14,8 @@ WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) def _make_id(target): # pragma: no cover + if isinstance(target, Proxy): + target = target._get_current_object() if hasattr(target, '__func__'): return (id(target.__self__), id(target.__func__)) return id(target) @@ -23,7 +27,7 @@ class Signal(object): # pragma: no cover .. attribute:: receivers Internal attribute, holds a dictionary of - `{receriverkey (id): weakref(receiver)}` mappings. + `{receiverkey (id): weakref(receiver)}` mappings. """ @@ -39,6 +43,12 @@ class Signal(object): # pragma: no cover providing_args = [] self.providing_args = set(providing_args) + def _connect_proxy(self, fun, sender, weak, dispatch_uid): + return self.connect( + fun, sender=sender._get_current_object(), + weak=weak, dispatch_uid=dispatch_uid, + ) + def connect(self, *args, **kwargs): """Connect receiver to sender for signal. @@ -74,6 +84,12 @@ class Signal(object): # pragma: no cover def _connect_signal(fun): receiver = fun + if isinstance(sender, PromiseProxy): + sender.__then__( + self._connect_proxy, fun, sender, weak, dispatch_uid, + ) + return fun + if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: diff --git a/awx/lib/site-packages/celery/utils/functional.py b/awx/lib/site-packages/celery/utils/functional.py index be8aa14fe0..faa272b323 100644 --- a/awx/lib/site-packages/celery/utils/functional.py +++ b/awx/lib/site-packages/celery/utils/functional.py @@ -121,7 +121,7 @@ class LRUCache(UserDict): return list(self._iterate_items()) -def memoize(maxsize=None, Cache=LRUCache): +def memoize(maxsize=None, keyfun=None, Cache=LRUCache): def _memoize(fun): mutex = threading.Lock() @@ -129,7 +129,10 @@ def memoize(maxsize=None, Cache=LRUCache): @wraps(fun) def _M(*args, **kwargs): - key = args + (KEYWORD_MARK, ) + tuple(sorted(kwargs.items())) + if keyfun: + key = keyfun(args, kwargs) + else: + key = args + (KEYWORD_MARK, ) + tuple(sorted(kwargs.items())) try: with mutex: value = cache[key] diff --git a/awx/lib/site-packages/celery/utils/iso8601.py b/awx/lib/site-packages/celery/utils/iso8601.py index d9de247091..c951cf6ea8 100644 --- a/awx/lib/site-packages/celery/utils/iso8601.py +++ b/awx/lib/site-packages/celery/utils/iso8601.py @@ -59,7 +59,9 @@ def parse_iso8601(datestring): raise ValueError('unable to parse date string %r' % datestring) groups = m.groupdict() tz = groups['timezone'] - if tz and tz != 'Z': + if tz == 'Z': + tz = FixedOffset(0) + elif tz: m = TIMEZONE_REGEX.match(tz) prefix, hours, minutes = m.groups() hours, minutes = int(hours), int(minutes) @@ -67,10 +69,9 @@ def parse_iso8601(datestring): hours = -hours minutes = -minutes tz = FixedOffset(minutes + hours * 60) - frac = groups['fraction'] - groups['fraction'] = int(float('0.%s' % frac) * 1e6) if frac else 0 + frac = groups['fraction'] or 0 return datetime( int(groups['year']), int(groups['month']), int(groups['day']), int(groups['hour']), int(groups['minute']), int(groups['second']), - int(groups['fraction']), tz + int(frac), tz ) diff --git a/awx/lib/site-packages/celery/utils/log.py b/awx/lib/site-packages/celery/utils/log.py index e20a9503bb..2cef638776 100644 --- a/awx/lib/site-packages/celery/utils/log.py +++ b/awx/lib/site-packages/celery/utils/log.py @@ -9,6 +9,7 @@ from __future__ import absolute_import, print_function import logging +import numbers import os import sys import threading @@ -110,7 +111,7 @@ def get_task_logger(name): def mlevel(level): - if level and not isinstance(level, int): + if level and not isinstance(level, numbers.Integral): return LOG_LEVELS[level.upper()] return level @@ -281,4 +282,10 @@ def get_multiprocessing_logger(): def reset_multiprocessing_logger(): if mputil and hasattr(mputil, '_logger'): mputil._logger = None + + +def current_process_index(base=1): + if current_process: + index = getattr(current_process(), 'index', None) + return index + base if index is not None else index ensure_process_aware_logger() diff --git a/awx/lib/site-packages/celery/utils/timeutils.py b/awx/lib/site-packages/celery/utils/timeutils.py index 785b86f31d..5b75b83a89 100644 --- a/awx/lib/site-packages/celery/utils/timeutils.py +++ b/awx/lib/site-packages/celery/utils/timeutils.py @@ -8,6 +8,7 @@ """ from __future__ import absolute_import +import numbers import os import time as _time @@ -134,7 +135,7 @@ timezone = _Zone() def maybe_timedelta(delta): """Coerces integer to timedelta if `delta` is an integer.""" - if isinstance(delta, (int, float)): + if isinstance(delta, numbers.Real): return timedelta(seconds=delta) return delta diff --git a/awx/lib/site-packages/celery/worker/__init__.py b/awx/lib/site-packages/celery/worker/__init__.py index 44da42bc9c..217902d2e1 100644 --- a/awx/lib/site-packages/celery/worker/__init__.py +++ b/awx/lib/site-packages/celery/worker/__init__.py @@ -29,7 +29,7 @@ from celery import concurrency as _concurrency from celery import platforms from celery import signals from celery.exceptions import ( - ImproperlyConfigured, SystemTerminate, TaskRevokedError, + ImproperlyConfigured, WorkerTerminate, TaskRevokedError, ) from celery.five import string_t, values from celery.utils import default_nodename, worker_direct @@ -204,7 +204,7 @@ class WorkController(object): """Starts the workers main loop.""" try: self.blueprint.start(self) - except SystemTerminate: + except WorkerTerminate: self.terminate() except Exception as exc: logger.error('Unrecoverable error: %r', exc, exc_info=True) diff --git a/awx/lib/site-packages/celery/worker/consumer.py b/awx/lib/site-packages/celery/worker/consumer.py index 3f156faae9..16fa0ff4e7 100644 --- a/awx/lib/site-packages/celery/worker/consumer.py +++ b/awx/lib/site-packages/celery/worker/consumer.py @@ -111,8 +111,16 @@ The full contents of the message body was: %s """ +MESSAGE_DECODE_ERROR = """\ +Can't decode message body: %r [type:%r encoding:%r headers:%s] + +body: %s +""" + MESSAGE_REPORT = """\ -body: {0} {{content_type:{1} content_encoding:{2} delivery_info:{3}}}\ +body: {0} +{{content_type:{1} content_encoding:{2} + delivery_info:{3} headers={4}}} """ MINGLE_GET_FIELDS = itemgetter('clock', 'revoked') @@ -228,7 +236,7 @@ class Consumer(object): def _update_prefetch_count(self, index=0): """Update prefetch count after pool/shrink grow operations. - Index must be the change in number of processes as a postive + Index must be the change in number of processes as a positive (increasing) or negative (decreasing) number. .. note:: @@ -320,9 +328,10 @@ class Consumer(object): :param exc: The original exception instance. """ - crit("Can't decode message body: %r (type:%r encoding:%r raw:%r')", + crit(MESSAGE_DECODE_ERROR, exc, message.content_type, message.content_encoding, - dump_body(message, message.body), exc_info=1) + safe_repr(message.headers), dump_body(message, message.body), + exc_info=1) message.ack() def on_close(self): @@ -407,7 +416,8 @@ class Consumer(object): return MESSAGE_REPORT.format(dump_body(message, body), safe_repr(message.content_type), safe_repr(message.content_encoding), - safe_repr(message.delivery_info)) + safe_repr(message.delivery_info), + safe_repr(message.headers)) def on_unknown_message(self, body, message): warn(UNKNOWN_FORMAT, self._message_report(body, message)) @@ -537,8 +547,42 @@ class Heart(bootsteps.StartStopStep): shutdown = stop -class Tasks(bootsteps.StartStopStep): +class Mingle(bootsteps.StartStopStep): + label = 'Mingle' requires = (Events, ) + compatible_transports = set(['amqp', 'redis']) + + def __init__(self, c, without_mingle=False, **kwargs): + self.enabled = not without_mingle and self.compatible_transport(c.app) + + def compatible_transport(self, app): + with app.connection() as conn: + return conn.transport.driver_type in self.compatible_transports + + def start(self, c): + info('mingle: searching for neighbors') + I = c.app.control.inspect(timeout=1.0, connection=c.connection) + replies = I.hello(c.hostname, revoked._data) or {} + replies.pop(c.hostname, None) + if replies: + info('mingle: sync with %s nodes', + len([reply for reply, value in items(replies) if value])) + for reply in values(replies): + if reply: + try: + other_clock, other_revoked = MINGLE_GET_FIELDS(reply) + except KeyError: # reply from pre-3.1 worker + pass + else: + c.app.clock.adjust(other_clock) + revoked.update(other_revoked) + info('mingle: sync complete') + else: + info('mingle: all alone') + + +class Tasks(bootsteps.StartStopStep): + requires = (Mingle, ) def __init__(self, c, **kwargs): c.task_consumer = c.qos = None @@ -579,42 +623,8 @@ class Agent(bootsteps.StartStopStep): return agent -class Mingle(bootsteps.StartStopStep): - label = 'Mingle' - requires = (Events, ) - compatible_transports = set(['amqp', 'redis']) - - def __init__(self, c, without_mingle=False, **kwargs): - self.enabled = not without_mingle and self.compatible_transport(c.app) - - def compatible_transport(self, app): - with app.connection() as conn: - return conn.transport.driver_type in self.compatible_transports - - def start(self, c): - info('mingle: searching for neighbors') - I = c.app.control.inspect(timeout=1.0, connection=c.connection) - replies = I.hello(c.hostname, revoked._data) or {} - replies.pop(c.hostname, None) - if replies: - info('mingle: sync with %s nodes', - len([reply for reply, value in items(replies) if value])) - for reply in values(replies): - if reply: - try: - other_clock, other_revoked = MINGLE_GET_FIELDS(reply) - except KeyError: # reply from pre-3.1 worker - pass - else: - c.app.clock.adjust(other_clock) - revoked.update(other_revoked) - info('mingle: sync complete') - else: - info('mingle: all alone') - - class Control(bootsteps.StartStopStep): - requires = (Mingle, ) + requires = (Tasks, ) def __init__(self, c, **kwargs): self.is_green = c.pool is not None and c.pool.is_green @@ -648,6 +658,7 @@ class Gossip(bootsteps.ConsumerStep): self.state = c.app.events.State( on_node_join=self.on_node_join, on_node_leave=self.on_node_leave, + max_tasks_in_memory=1, ) if c.hub: c._mutex = DummyLock() @@ -761,6 +772,10 @@ class Gossip(bootsteps.ConsumerStep): def on_message(self, prepare, message): _type = message.delivery_info['routing_key'] + + # For redis when `fanout_patterns=False` (See Issue #1882) + if _type.split('.', 1)[0] == 'task': + return try: handler = self.event_handlers[_type] except KeyError: diff --git a/awx/lib/site-packages/celery/worker/control.py b/awx/lib/site-packages/celery/worker/control.py index a1502525fe..fcaf040815 100644 --- a/awx/lib/site-packages/celery/worker/control.py +++ b/awx/lib/site-packages/celery/worker/control.py @@ -13,6 +13,7 @@ import tempfile from kombu.utils.encoding import safe_repr +from celery.exceptions import WorkerShutdown from celery.five import UserDict, items from celery.platforms import signals as _signals from celery.utils import timeutils @@ -336,7 +337,7 @@ def autoscale(state, max=None, min=None): @Panel.register def shutdown(state, msg='Got shutdown from remote', **kwargs): logger.warning(msg) - raise SystemExit(msg) + raise WorkerShutdown(msg) @Panel.register diff --git a/awx/lib/site-packages/celery/worker/heartbeat.py b/awx/lib/site-packages/celery/worker/heartbeat.py index 297c6cad2c..cf46ab0c87 100644 --- a/awx/lib/site-packages/celery/worker/heartbeat.py +++ b/awx/lib/site-packages/celery/worker/heartbeat.py @@ -22,7 +22,7 @@ class Heart(object): :param timer: Timer instance. :param eventer: Event dispatcher used to send the event. :keyword interval: Time in seconds between heartbeats. - Default is 30 seconds. + Default is 2 seconds. """ diff --git a/awx/lib/site-packages/celery/worker/job.py b/awx/lib/site-packages/celery/worker/job.py index 75c4a193e7..b277520e37 100644 --- a/awx/lib/site-packages/celery/worker/job.py +++ b/awx/lib/site-packages/celery/worker/job.py @@ -288,8 +288,8 @@ class Request(object): 'hostname': self.hostname, 'is_eager': False, 'delivery_info': self.delivery_info}) retval = trace_task(self.task, self.id, self.args, kwargs, request, - **{'hostname': self.hostname, - 'loader': self.app.loader}) + hostname=self.hostname, loader=self.app.loader, + app=self.app) self.acknowledge() return retval @@ -371,6 +371,9 @@ class Request(object): if self.store_errors: self.task.backend.mark_as_failure(self.id, exc, request=self) + if self.task.acks_late: + self.acknowledge() + def on_success(self, ret_value, now=None, nowfun=monotonic): """Handler called if the task was successfully processed.""" if isinstance(ret_value, ExceptionInfo): diff --git a/awx/lib/site-packages/celery/worker/loops.py b/awx/lib/site-packages/celery/worker/loops.py index 12842ffb1c..0891f51a6c 100644 --- a/awx/lib/site-packages/celery/worker/loops.py +++ b/awx/lib/site-packages/celery/worker/loops.py @@ -10,7 +10,7 @@ from __future__ import absolute_import import socket from celery.bootsteps import RUN -from celery.exceptions import SystemTerminate, WorkerLostError +from celery.exceptions import WorkerShutdown, WorkerTerminate, WorkerLostError from celery.utils.log import get_logger from . import state @@ -25,11 +25,11 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos, heartbeat, clock, hbrate=2.0, RUN=RUN): """Non-blocking event loop consuming messages until connection is lost, or shutdown is requested.""" - update_qos = qos.update readers, writers = hub.readers, hub.writers hbtick = connection.heartbeat_check errors = connection.connection_errors + heartbeat = connection.get_heartbeat_interval() # negotiated hub_add, hub_remove = hub.add, hub.remove on_task_received = obj.create_task_handler() @@ -58,9 +58,9 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos, while blueprint.state == RUN and obj.connection: # shutdown if signal handlers told us to. if state.should_stop: - raise SystemExit() + raise WorkerShutdown() elif state.should_terminate: - raise SystemTerminate() + raise WorkerTerminate() # We only update QoS when there is no more messages to read. # This groups together qos calls, and makes sure that remote @@ -74,7 +74,7 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos, loop = hub.create_loop() finally: try: - hub.close() + hub.reset() except Exception as exc: error( 'Error cleaning up after event loop: %r', exc, exc_info=1, diff --git a/awx/lib/site-packages/celery/worker/pidbox.py b/awx/lib/site-packages/celery/worker/pidbox.py index 1165361e58..99c7a1a396 100644 --- a/awx/lib/site-packages/celery/worker/pidbox.py +++ b/awx/lib/site-packages/celery/worker/pidbox.py @@ -44,6 +44,7 @@ class Pidbox(object): def start(self, c): self.node.channel = c.connection.channel() self.consumer = self.node.listen(callback=self.on_message) + self.consumer.on_decode_error = c.on_decode_error def on_stop(self): pass diff --git a/awx/lib/site-packages/celery/worker/state.py b/awx/lib/site-packages/celery/worker/state.py index 68aac9df5a..8abaa5d738 100644 --- a/awx/lib/site-packages/celery/worker/state.py +++ b/awx/lib/site-packages/celery/worker/state.py @@ -22,7 +22,7 @@ from kombu.utils import cached_property from celery import __version__ from celery.datastructures import LimitedSet -from celery.exceptions import SystemTerminate +from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.five import Counter __all__ = ['SOFTWARE_INFO', 'reserved_requests', 'active_requests', @@ -66,9 +66,9 @@ should_terminate = False def maybe_shutdown(): if should_stop: - raise SystemExit() + raise WorkerShutdown() elif should_terminate: - raise SystemTerminate() + raise WorkerTerminate() def task_accepted(request, _all_total_count=all_total_count): diff --git a/awx/lib/site-packages/django_auth_ldap/__init__.py b/awx/lib/site-packages/django_auth_ldap/__init__.py index 7e7b1c57f1..fe08020d3d 100644 --- a/awx/lib/site-packages/django_auth_ldap/__init__.py +++ b/awx/lib/site-packages/django_auth_ldap/__init__.py @@ -1,2 +1,2 @@ -version = (1, 1, 7) +version = (1, 1, 8) version_string = '.'.join(map(str, version)) diff --git a/awx/lib/site-packages/django_auth_ldap/config.py b/awx/lib/site-packages/django_auth_ldap/config.py index 693e9faef5..4b792a7212 100644 --- a/awx/lib/site-packages/django_auth_ldap/config.py +++ b/awx/lib/site-packages/django_auth_ldap/config.py @@ -216,6 +216,18 @@ class LDAPSearchUnion(object): self.searches = args self.ldap = _LDAPConfig.get_ldap() + def search_with_additional_terms(self, term_dict, escape=True): + searches = [s.search_with_additional_terms(term_dict, escape) + for s in self.searches] + + return self.__class__(*searches) + + def search_with_additional_term_string(self, filterstr): + searches = [s.search_with_additional_term_string(filterstr) + for s in self.searches] + + return self.__class__(*searches) + def execute(self, connection, filterargs=()): msgids = [search._begin(connection, filterargs) for search in self.searches] results = {} diff --git a/awx/lib/site-packages/django_auth_ldap/tests.py b/awx/lib/site-packages/django_auth_ldap/tests.py index ffa1eb8b38..b83abe4612 100644 --- a/awx/lib/site-packages/django_auth_ldap/tests.py +++ b/awx/lib/site-packages/django_auth_ldap/tests.py @@ -67,6 +67,7 @@ class LDAPTest(TestCase): top = ("o=test", {"o": "test"}) people = ("ou=people,o=test", {"ou": "people"}) groups = ("ou=groups,o=test", {"ou": "groups"}) + moregroups = ("ou=moregroups,o=test", {"ou": "moregroups"}) alice = ("uid=alice,ou=people,o=test", { "uid": ["alice"], @@ -122,7 +123,7 @@ class LDAPTest(TestCase): "memberUid": ["alice"], }) - # groupOfUniqueName groups + # groupOfNames groups empty_gon = ("cn=empty_gon,ou=groups,o=test", { "cn": ["empty_gon"], "objectClass": ["groupOfNames"], @@ -143,6 +144,11 @@ class LDAPTest(TestCase): "objectClass": ["groupOfNames"], "member": ["uid=alice,ou=people,o=test"] }) + other_gon = ("cn=other_gon,ou=moregroups,o=test", { + "cn": ["other_gon"], + "objectClass": ["groupOfNames"], + "member": ["uid=bob,ou=people,o=test"] + }) # Nested groups with a circular reference parent_gon = ("cn=parent_gon,ou=groups,o=test", { @@ -164,10 +170,10 @@ class LDAPTest(TestCase): "member": ["cn=parent_gon,ou=groups,o=test"] }) - directory = dict([top, people, groups, alice, bob, dressler, nobody, - active_px, staff_px, superuser_px, empty_gon, active_gon, - staff_gon, superuser_gon, parent_gon, nested_gon, - circular_gon]) + directory = dict([top, people, groups, moregroups, alice, bob, dressler, + nobody, active_px, staff_px, superuser_px, empty_gon, + active_gon, staff_gon, superuser_gon, other_gon, + parent_gon, nested_gon, circular_gon]) @classmethod def configure_logger(cls): @@ -594,7 +600,7 @@ class LDAPTest(TestCase): def test_require_group(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE, '(objectClass=groupOfNames)'), GROUP_TYPE=MemberDNGroupType(member_attr='member'), REQUIRE_GROUP="cn=active_gon,ou=groups,o=test" ) @@ -610,6 +616,42 @@ class LDAPTest(TestCase): 'initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s'] ) + def test_group_union(self): + self._init_settings( + USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', + GROUP_SEARCH=LDAPSearchUnion( + LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE, '(objectClass=groupOfNames)'), + LDAPSearch('ou=moregroups,o=test', ldap.SCOPE_SUBTREE, '(objectClass=groupOfNames)') + ), + GROUP_TYPE=MemberDNGroupType(member_attr='member'), + REQUIRE_GROUP="cn=other_gon,ou=moregroups,o=test" + ) + + alice = self.backend.authenticate(username='alice', password='password') + bob = self.backend.authenticate(username='bob', password='password') + + self.assertTrue(alice is None) + self.assertTrue(bob is not None) + self.assertEqual(bob.ldap_user.group_names, set(['other_gon'])) + + def test_nested_group_union(self): + self._init_settings( + USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', + GROUP_SEARCH=LDAPSearchUnion( + LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE, '(objectClass=groupOfNames)'), + LDAPSearch('ou=moregroups,o=test', ldap.SCOPE_SUBTREE, '(objectClass=groupOfNames)') + ), + GROUP_TYPE=NestedMemberDNGroupType(member_attr='member'), + REQUIRE_GROUP="cn=other_gon,ou=moregroups,o=test" + ) + + alice = self.backend.authenticate(username='alice', password='password') + bob = self.backend.authenticate(username='bob', password='password') + + self.assertTrue(alice is None) + self.assertTrue(bob is not None) + self.assertEqual(bob.ldap_user.group_names, set(['other_gon'])) + def test_denied_group(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', diff --git a/awx/lib/site-packages/django_extensions/__init__.py b/awx/lib/site-packages/django_extensions/__init__.py index ec2dcb1cb0..a28dfa9a91 100644 --- a/awx/lib/site-packages/django_extensions/__init__.py +++ b/awx/lib/site-packages/django_extensions/__init__.py @@ -1,5 +1,5 @@ -VERSION = (1, 2, 5) +VERSION = (1, 3, 3) # Dynamically calculate the version based on VERSION tuple if len(VERSION) > 2 and VERSION[2] is not None: diff --git a/awx/lib/site-packages/django_extensions/admin/__init__.py b/awx/lib/site-packages/django_extensions/admin/__init__.py index 51a50d3303..fdeceeeb42 100644 --- a/awx/lib/site-packages/django_extensions/admin/__init__.py +++ b/awx/lib/site-packages/django_extensions/admin/__init__.py @@ -1,13 +1,6 @@ # # Autocomplete feature for admin panel # -# Most of the code has been written by Jannis Leidel and was updated a bit -# for django_extensions. -# http://jannisleidel.com/2008/11/autocomplete-form-widget-foreignkey-model-fields/ -# -# to_string_function, Satchmo adaptation and some comments added by emes -# (Michal Salaban) -# import six import operator @@ -52,16 +45,20 @@ class ForeignKeyAutocompleteAdmin(ModelAdmin): take target model instance as only argument and return string representation. By default __unicode__() method of target object is used. + + And also an optional additional field to set the limit on the + results returned by the autocomplete query. You can set this integer + value in your settings file using FOREIGNKEY_AUTOCOMPLETE_LIMIT or + you can set this per ForeignKeyAutocompleteAdmin basis. If any value + is set the results will not be limited. """ related_search_fields = {} related_string_functions = {} + autocomplete_limit = getattr(settings, 'FOREIGNKEY_AUTOCOMPLETE_LIMIT', None) def get_urls(self): - try: - from django.conf.urls import patterns, url - except ImportError: # django < 1.4 - from django.conf.urls.defaults import patterns, url + from django.conf.urls import patterns, url def wrap(view): def wrapper(*args, **kwargs): @@ -84,10 +81,12 @@ class ForeignKeyAutocompleteAdmin(ModelAdmin): model_name = request.GET.get('model_name', None) search_fields = request.GET.get('search_fields', None) object_pk = request.GET.get('object_pk', None) + try: to_string_function = self.related_string_functions[model_name] except KeyError: to_string_function = lambda x: x.__unicode__() + if search_fields and app_label and model_name and (query or object_pk): def construct_search(field_name): # use different lookup methods depending on the notation @@ -106,9 +105,13 @@ class ForeignKeyAutocompleteAdmin(ModelAdmin): for bit in query.split(): or_queries = [models.Q(**{construct_search(smart_str(field_name)): smart_str(bit)}) for field_name in search_fields.split(',')] other_qs = QuerySet(model) - other_qs.dup_select_related(queryset) + other_qs.query.select_related = queryset.query.select_related other_qs = other_qs.filter(reduce(operator.or_, or_queries)) queryset = queryset & other_qs + + if self.autocomplete_limit: + queryset = queryset[:self.autocomplete_limit] + data = ''.join([six.u('%s|%s\n') % (to_string_function(f), f.pk) for f in queryset]) elif object_pk: try: diff --git a/awx/lib/site-packages/django_extensions/admin/widgets.py b/awx/lib/site-packages/django_extensions/admin/widgets.py index 07fa22e238..090ee19a85 100644 --- a/awx/lib/site-packages/django_extensions/admin/widgets.py +++ b/awx/lib/site-packages/django_extensions/admin/widgets.py @@ -1,13 +1,10 @@ import six import django + from django import forms -from django.conf import settings from django.contrib.admin.sites import site from django.utils.safestring import mark_safe -if django.get_version() >= "1.4": - from django.utils.text import Truncator -else: - from django.utils.text import truncate_words +from django.utils.text import Truncator from django.template.loader import render_to_string from django.contrib.admin.widgets import ForeignKeyRawIdWidget @@ -26,6 +23,12 @@ class ForeignKeySearchInput(ForeignKeyRawIdWidget): js_files = ['django_extensions/js/jquery.bgiframe.min.js', 'django_extensions/js/jquery.ajaxQueue.js', 'django_extensions/js/jquery.autocomplete.js'] + + # Use a newer version of jquery if django version <= 1.5.x + # When removing this compatibility code also remove jquery-1.7.2.min.js file. + if int(django.get_version()[2]) <= 5: + js_files.insert(0, 'django_extensions/js/jquery-1.7.2.min.js') + return forms.Media(css={'all': ('django_extensions/css/jquery.autocomplete.css',)}, js=js_files) @@ -34,17 +37,12 @@ class ForeignKeySearchInput(ForeignKeyRawIdWidget): def label_for_value(self, value): key = self.rel.get_related_field().name obj = self.rel.to._default_manager.get(**{key: value}) - if django.get_version() >= "1.4": - return Truncator(obj).words(14, truncate='...') - else: - return truncate_words(obj, 14) + + return Truncator(obj).words(14, truncate='...') def __init__(self, rel, search_fields, attrs=None): self.search_fields = search_fields - if django.get_version() >= "1.4": - super(ForeignKeySearchInput, self).__init__(rel, site, attrs) - else: - super(ForeignKeySearchInput, self).__init__(rel, attrs) + super(ForeignKeySearchInput, self).__init__(rel, site, attrs) def render(self, name, value, attrs=None): if attrs is None: @@ -59,31 +57,26 @@ class ForeignKeySearchInput(ForeignKeyRawIdWidget): url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()]) else: url = '' + if not 'class' in attrs: attrs['class'] = 'vForeignKeyRawIdAdminField' # Call the TextInput render method directly to have more control output = [forms.TextInput.render(self, name, value, attrs)] + if value: label = self.label_for_value(value) else: label = six.u('') - try: - admin_media_prefix = settings.ADMIN_MEDIA_PREFIX - except AttributeError: - admin_media_prefix = settings.STATIC_URL + "admin/" - context = { 'url': url, 'related_url': related_url, - 'admin_media_prefix': admin_media_prefix, 'search_path': self.search_path, 'search_fields': ','.join(self.search_fields), - 'model_name': model_name, 'app_label': app_label, + 'model_name': model_name, 'label': label, 'name': name, - 'pre_django_14': (django.VERSION[:2] < (1, 4)), } output.append(render_to_string(self.widget_template or ( 'django_extensions/widgets/%s/%s/foreignkey_searchinput.html' % (app_label, model_name), @@ -91,4 +84,5 @@ class ForeignKeySearchInput(ForeignKeyRawIdWidget): 'django_extensions/widgets/foreignkey_searchinput.html', ), context)) output.reverse() + return mark_safe(six.u('').join(output)) diff --git a/awx/lib/site-packages/django_extensions/conf/template_tags_template/templatetags/__init__.py.tmpl b/awx/lib/site-packages/django_extensions/conf/template_tags_template/templatetags/__init__.py.tmpl new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/django_extensions/conf/template_tags_template/templatetags/sample.py.tmpl b/awx/lib/site-packages/django_extensions/conf/template_tags_template/templatetags/sample.py.tmpl new file mode 100644 index 0000000000..d0b488128e --- /dev/null +++ b/awx/lib/site-packages/django_extensions/conf/template_tags_template/templatetags/sample.py.tmpl @@ -0,0 +1,4 @@ +from django import template + +register = template.Library() + diff --git a/awx/lib/site-packages/django_extensions/jobs/daily/cache_cleanup.py b/awx/lib/site-packages/django_extensions/jobs/daily/cache_cleanup.py index 2faac9f794..b598e0de9e 100644 --- a/awx/lib/site-packages/django_extensions/jobs/daily/cache_cleanup.py +++ b/awx/lib/site-packages/django_extensions/jobs/daily/cache_cleanup.py @@ -5,6 +5,7 @@ Can be run as a cronjob to clean out old data from the database (only expired sessions at the moment). """ +import six from django_extensions.management.jobs import DailyJob @@ -25,7 +26,7 @@ class Job(DailyJob): from django.core.cache import get_cache from django.db import router, connections - for cache_name, cache_options in settings.CACHES.iteritems(): + for cache_name, cache_options in six.iteritems(settings.CACHES): if cache_options['BACKEND'].endswith("DatabaseCache"): cache = get_cache(cache_name) db = router.db_for_write(cache.cache_model_class) diff --git a/awx/lib/site-packages/django_extensions/locale/cs/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/cs/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..c2635d7ec1 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/cs/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/cs/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/cs/LC_MESSAGES/django.po new file mode 100644 index 0000000000..0bf050e7a4 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/cs/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: django-extensions\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:42+0100\n" +"PO-Revision-Date: 2011-02-02 10:42+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: cs\n" +"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" + +#: db/models.py:15 +msgid "created" +msgstr "" + +#: db/models.py:16 +msgid "modified" +msgstr "" + +#: db/models.py:26 +msgid "title" +msgstr "" + +#: db/models.py:27 +msgid "slug" +msgstr "" + +#: db/models.py:28 +msgid "description" +msgstr "" + +#: db/models.py:50 +msgid "Inactive" +msgstr "" + +#: db/models.py:51 +msgid "Active" +msgstr "" + +#: db/models.py:53 +msgid "status" +msgstr "" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "" diff --git a/awx/lib/site-packages/django_extensions/locale/da/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/da/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..1972c4f810 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/da/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/da/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/da/LC_MESSAGES/django.po new file mode 100644 index 0000000000..42e323e935 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/da/LC_MESSAGES/django.po @@ -0,0 +1,79 @@ +# django_extentions in Danish. +# django_extensions på Dansk. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Michael Lind Mortensen , 2009. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:42+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "og" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Brug feltet til venstre til at lave %(model_name)s lookups i felterne %" +"(field_list)s." + +#: db/models.py:15 +msgid "created" +msgstr "skabt" + +#: db/models.py:16 +msgid "modified" +msgstr "ændret" + +#: db/models.py:26 +msgid "title" +msgstr "titel" + +#: db/models.py:27 +msgid "slug" +msgstr "slug" + +#: db/models.py:28 +msgid "description" +msgstr "beskrivelse" + +#: db/models.py:50 +msgid "Inactive" +msgstr "" + +#: db/models.py:51 +msgid "Active" +msgstr "" + +#: db/models.py:53 +msgid "status" +msgstr "" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Lookup" diff --git a/awx/lib/site-packages/django_extensions/locale/de/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/de/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..6367477a60 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/de/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/de/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/de/LC_MESSAGES/django.po new file mode 100644 index 0000000000..81a35db99e --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/de/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:42+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "und" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Das linke Feld benutzen, um %(model_name)s Abfragen in den Feldern %" +"(field_list)s durchführen." + +#: db/models.py:15 +msgid "created" +msgstr "erstellt" + +#: db/models.py:16 +msgid "modified" +msgstr "geändert" + +#: db/models.py:26 +msgid "title" +msgstr "Titel" + +#: db/models.py:27 +msgid "slug" +msgstr "Slug" + +#: db/models.py:28 +msgid "description" +msgstr "Beschreibung" + +#: db/models.py:50 +msgid "Inactive" +msgstr "Inaktiv" + +#: db/models.py:51 +msgid "Active" +msgstr "Aktiv" + +#: db/models.py:53 +msgid "status" +msgstr "Status" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "Leer lassen für sofortige Aktivierung" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "Leer lassen für unbefristete Aktivierung" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "%s ist kein urlpattern Objekt" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Abfrage" diff --git a/awx/lib/site-packages/django_extensions/locale/el/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/el/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..de23560b7e Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/el/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/el/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/el/LC_MESSAGES/django.po new file mode 100644 index 0000000000..19355db51d --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/el/LC_MESSAGES/django.po @@ -0,0 +1,79 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: django-extensions\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:42+0100\n" +"PO-Revision-Date: 2011-02-02 10:38+0000\n" +"Last-Translator: Jannis \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: el\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "και" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Χρησιμοποίησε το αριστερό πεδίο για να κάνεις αναζήτηση του %(model_name)s " +"με βάσει τα πεδία %(field_list)s." + +#: db/models.py:15 +msgid "created" +msgstr "δημιουργήθηκε" + +#: db/models.py:16 +msgid "modified" +msgstr "τροποποιήθηκε" + +#: db/models.py:26 +msgid "title" +msgstr "τίτλος" + +#: db/models.py:27 +msgid "slug" +msgstr "μίνι-όνομα" + +#: db/models.py:28 +msgid "description" +msgstr "περιγραφή" + +#: db/models.py:50 +msgid "Inactive" +msgstr "ανενεργό" + +#: db/models.py:51 +msgid "Active" +msgstr "Ενεργό" + +#: db/models.py:53 +msgid "status" +msgstr "κατάσταση" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "αφήστε άδειο για άμεση ενεργοποίηση" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "αφήστε άδειο για αόριστη ενεργοποίηση" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "% s δεν φαίνεται να είναι ένα αντικείμενο urlpattern" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Αναζήτηση" diff --git a/awx/lib/site-packages/django_extensions/locale/en/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/en/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..4ed8824d31 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/en/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/en/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/en/LC_MESSAGES/django.po new file mode 100644 index 0000000000..8fd54f3259 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/en/LC_MESSAGES/django.po @@ -0,0 +1,76 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:42+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" + +#: db/models.py:15 +msgid "created" +msgstr "" + +#: db/models.py:16 +msgid "modified" +msgstr "" + +#: db/models.py:26 +msgid "title" +msgstr "" + +#: db/models.py:27 +msgid "slug" +msgstr "" + +#: db/models.py:28 +msgid "description" +msgstr "" + +#: db/models.py:50 +msgid "Inactive" +msgstr "" + +#: db/models.py:51 +msgid "Active" +msgstr "" + +#: db/models.py:53 +msgid "status" +msgstr "" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "" diff --git a/awx/lib/site-packages/django_extensions/locale/es/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/es/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..8ff3f06505 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/es/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/es/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/es/LC_MESSAGES/django.po new file mode 100644 index 0000000000..e85b113afc --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/es/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "y" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Utilice el campo de la izquierda para hacer búsquedas en los campos %" +"(field_list)s de %(model_name)s." + +#: db/models.py:15 +msgid "created" +msgstr "creado" + +#: db/models.py:16 +msgid "modified" +msgstr "modificado" + +#: db/models.py:26 +msgid "title" +msgstr "titulo" + +#: db/models.py:27 +msgid "slug" +msgstr "slug" + +#: db/models.py:28 +msgid "description" +msgstr "descripción" + +#: db/models.py:50 +msgid "Inactive" +msgstr "Inactivo" + +#: db/models.py:51 +msgid "Active" +msgstr "Activo" + +#: db/models.py:53 +msgid "status" +msgstr "estado" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "mantener vacío para una activación inmediata" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "deje vacío para mantener la activación indefinida" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "% s no parece ser un objeto urlpattern" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Buscar" diff --git a/awx/lib/site-packages/django_extensions/locale/et/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/et/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..caac6b973f Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/et/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/et/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/et/LC_MESSAGES/django.po new file mode 100644 index 0000000000..8a3c51a34c --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/et/LC_MESSAGES/django.po @@ -0,0 +1,75 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "ja" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" + +#: db/models.py:15 +msgid "created" +msgstr "" + +#: db/models.py:16 +msgid "modified" +msgstr "" + +#: db/models.py:26 +msgid "title" +msgstr "" + +#: db/models.py:27 +msgid "slug" +msgstr "" + +#: db/models.py:28 +msgid "description" +msgstr "" + +#: db/models.py:50 +msgid "Inactive" +msgstr "" + +#: db/models.py:51 +msgid "Active" +msgstr "" + +#: db/models.py:53 +msgid "status" +msgstr "" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "" diff --git a/awx/lib/site-packages/django_extensions/locale/fr/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/fr/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..bc4e1cd7c3 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/fr/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/fr/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/fr/LC_MESSAGES/django.po new file mode 100644 index 0000000000..02a00a07ba --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/fr/LC_MESSAGES/django.po @@ -0,0 +1,81 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# +# Translators: +# mathiasuk, 2014 +# mathiasuk, 2014 +# stevandoh , 2013 +msgid "" +msgstr "" +"Project-Id-Version: django-extensions\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:42+0100\n" +"PO-Revision-Date: 2014-01-11 11:14+0000\n" +"Last-Translator: mathiasuk\n" +"Language-Team: French (http://www.transifex.com/projects/p/django-extensions/language/fr/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fr\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "et" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields " +"%(field_list)s." +msgstr "Utilisez le champ de gauche pour faire des recheres de %(model_name)s dans les champs %(field_list)s." + +#: db/models.py:15 +msgid "created" +msgstr "créé" + +#: db/models.py:16 +msgid "modified" +msgstr "mis à jour" + +#: db/models.py:26 +msgid "title" +msgstr "titre" + +#: db/models.py:27 +msgid "slug" +msgstr "slug" + +#: db/models.py:28 +msgid "description" +msgstr "description" + +#: db/models.py:50 +msgid "Inactive" +msgstr "Inactif" + +#: db/models.py:51 +msgid "Active" +msgstr "Actif" + +#: db/models.py:53 +msgid "status" +msgstr "état" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "laisser vide pour activation immédiate" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "laisser vide pour activation indéterminée" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "%s ne semble pas etre un object urlpattern" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Recherche" diff --git a/awx/lib/site-packages/django_extensions/locale/hu/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/hu/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..2f98573ee2 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/hu/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/hu/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/hu/LC_MESSAGES/django.po new file mode 100644 index 0000000000..50ec58e476 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/hu/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "és" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Használd a baloldali mezőt hogy keress a %(model_name)s %(field_list)s. " +"mezőiben" + +#: db/models.py:15 +msgid "created" +msgstr "létrehozva" + +#: db/models.py:16 +msgid "modified" +msgstr "módosítva" + +#: db/models.py:26 +msgid "title" +msgstr "Cím" + +#: db/models.py:27 +msgid "slug" +msgstr "Slug" + +#: db/models.py:28 +msgid "description" +msgstr "Leírás" + +#: db/models.py:50 +msgid "Inactive" +msgstr "Inaktív" + +#: db/models.py:51 +msgid "Active" +msgstr "Aktív" + +#: db/models.py:53 +msgid "status" +msgstr "Állapot" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "Üresen hagyni azonnali aktiváláshoz" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "Üresen hagyni korlátlan aktiváláshoz" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "Úgy néz ki hogy %s nem egy urlpattern objektum" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Lekérdezés" diff --git a/awx/lib/site-packages/django_extensions/locale/it/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/it/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..c7fa82971b Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/it/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/it/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/it/LC_MESSAGES/django.po new file mode 100644 index 0000000000..14af204353 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/it/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "e" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Utilizzare il campo a sinistra per fare ricerche nei campi %(field_list)s " +"del modello %(model_name)s." + +#: db/models.py:15 +msgid "created" +msgstr "creato" + +#: db/models.py:16 +msgid "modified" +msgstr "modificato" + +#: db/models.py:26 +msgid "title" +msgstr "titolo" + +#: db/models.py:27 +msgid "slug" +msgstr "slug" + +#: db/models.py:28 +msgid "description" +msgstr "descrizione" + +#: db/models.py:50 +msgid "Inactive" +msgstr "Inattivo" + +#: db/models.py:51 +msgid "Active" +msgstr "Attivo" + +#: db/models.py:53 +msgid "status" +msgstr "stato" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "lasciare vuoto per attivazione immediata" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "lasciare vuoti per attivazione indefinita" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "% s non sembra essere un oggetto urlPattern" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Ricerca" diff --git a/awx/lib/site-packages/django_extensions/locale/ja/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/ja/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..95b488d8cf Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/ja/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/ja/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/ja/LC_MESSAGES/django.po new file mode 100644 index 0000000000..ed42a0e180 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/ja/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "と" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"%(field_list)s フィールドの内容から %(model_name)s を検索するには左のフィール" +"ドを使用して下さい。" + +#: db/models.py:15 +msgid "created" +msgstr "作成日時" + +#: db/models.py:16 +msgid "modified" +msgstr "変更日時" + +#: db/models.py:26 +msgid "title" +msgstr "タイトル" + +#: db/models.py:27 +msgid "slug" +msgstr "スラグ" + +#: db/models.py:28 +msgid "description" +msgstr "説明" + +#: db/models.py:50 +msgid "Inactive" +msgstr "非アクティブ" + +#: db/models.py:51 +msgid "Active" +msgstr "アクティブ" + +#: db/models.py:53 +msgid "status" +msgstr "ステータス" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "すぐに有効化する場合は空白のままにして下さい" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "無期限に有効化しておく場合は空白のままにして下さい" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "%s は urlpattern オブジェクトではないようです" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "検索" diff --git a/awx/lib/site-packages/django_extensions/locale/pl/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/pl/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..bf43c6447f Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/pl/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/pl/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/pl/LC_MESSAGES/django.po new file mode 100644 index 0000000000..446e142866 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/pl/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: Zbigniew Siciarz \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "i" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Użyj pola po lewej, by wyszukać pola %(field_list)s w modelu %(model_name)s." + +#: db/models.py:15 +msgid "created" +msgstr "utworzony" + +#: db/models.py:16 +msgid "modified" +msgstr "zmodyfikowany" + +#: db/models.py:26 +msgid "title" +msgstr "tytuł" + +#: db/models.py:27 +msgid "slug" +msgstr "slug" + +#: db/models.py:28 +msgid "description" +msgstr "opis" + +#: db/models.py:50 +msgid "Inactive" +msgstr "Nieaktywny" + +#: db/models.py:51 +msgid "Active" +msgstr "Aktywny" + +#: db/models.py:53 +msgid "status" +msgstr "stan" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "pozostaw puste, by aktywować od razu" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "pozostaw puste, by nie definiować daty deaktywacji" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "%s nie jest obiektem typu urlpattern" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Szukaj" diff --git a/awx/lib/site-packages/django_extensions/locale/pt/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/pt/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..7f9a633ede Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/pt/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/pt/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/pt/LC_MESSAGES/django.po new file mode 100644 index 0000000000..eb6c68b4b4 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/pt/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: 2010-11-15 22:06-0300\n" +"Last-Translator: Fernando Silva \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "e" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Use o campo esquerdo para fazer com que o modelo %(model_name)s procure nos " +"campos %(field_list)s." + +#: db/models.py:15 +msgid "created" +msgstr "criado" + +#: db/models.py:16 +msgid "modified" +msgstr "modificado" + +#: db/models.py:26 +msgid "title" +msgstr "título" + +#: db/models.py:27 +msgid "slug" +msgstr "slug" + +#: db/models.py:28 +msgid "description" +msgstr "descrição" + +#: db/models.py:50 +msgid "Inactive" +msgstr "Inativo" + +#: db/models.py:51 +msgid "Active" +msgstr "Ativo" + +#: db/models.py:53 +msgid "status" +msgstr "estado" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "deixe vazio para ativação imediata" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "deixe vazio para ativação por tempo indeterminado" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "%s não parece ser um objeto urlpattern" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Busca" diff --git a/awx/lib/site-packages/django_extensions/locale/pt_BR/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/pt_BR/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..0282342d4f Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/pt_BR/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/pt_BR/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/pt_BR/LC_MESSAGES/django.po new file mode 100644 index 0000000000..c5e12b0bd1 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/pt_BR/LC_MESSAGES/django.po @@ -0,0 +1,79 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Claudemiro Alves Feitosa Neto , 2013. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-09-13 22:49-0300\n" +"PO-Revision-Date: 2013-09-13 22:49-0300\n" +"Last-Translator: Claudemiro Alves Feitosa \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: admin/__init__.py:128 +msgid "and" +msgstr "e" + +#: admin/__init__.py:130 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "Use o campo da esquerda para fazer com que o modelo %(model_name)s procure nos " +"campos %(field_list)s" + +#: db/models.py:22 mongodb/models.py:17 +msgid "created" +msgstr "criado" + +#: db/models.py:23 mongodb/models.py:18 +msgid "modified" +msgstr "modificado" + +#: db/models.py:36 mongodb/models.py:29 +msgid "title" +msgstr "título" + +#: db/models.py:37 mongodb/models.py:30 +msgid "slug" +msgstr "slug" + +#: db/models.py:38 mongodb/models.py:31 +msgid "description" +msgstr "descrição" + +#: db/models.py:63 mongodb/models.py:55 +msgid "Inactive" +msgstr "Inativo" + +#: db/models.py:64 mongodb/models.py:56 +msgid "Active" +msgstr "Ativo" + +#: db/models.py:66 mongodb/models.py:58 +msgid "status" +msgstr "status" + +#: db/models.py:67 mongodb/models.py:59 +msgid "keep empty for an immediate activation" +msgstr "deixe vazio para uma ativação imediata" + +#: db/models.py:68 mongodb/models.py:60 +msgid "keep empty for indefinite activation" +msgstr "deixe vazio para ativação por tempo indeterminado" + +#: mongodb/fields/__init__.py:24 +#, python-format +msgid "String (up to %(max_length)s)" +msgstr "Cadeia de Caracteres (até %(max_length)s)" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Busca" diff --git a/awx/lib/site-packages/django_extensions/locale/ro/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/ro/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..00626a0d2b Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/ro/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/ro/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/ro/LC_MESSAGES/django.po new file mode 100644 index 0000000000..8f4180ed1b --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/ro/LC_MESSAGES/django.po @@ -0,0 +1,80 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: django-extensions\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: 2011-02-02 10:38+0000\n" +"Last-Translator: Jannis \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ro\n" +"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < " +"20)) ? 1 : 2)\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "și" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" +"Folosește câmpul din stânga pentru a efectua căutări de %(model_name)s în " +"câmpurile %(field_list)s." + +#: db/models.py:15 +msgid "created" +msgstr "creat" + +#: db/models.py:16 +msgid "modified" +msgstr "modificat" + +#: db/models.py:26 +msgid "title" +msgstr "Titlu" + +#: db/models.py:27 +msgid "slug" +msgstr "Slug" + +#: db/models.py:28 +msgid "description" +msgstr "Descriere" + +#: db/models.py:50 +msgid "Inactive" +msgstr "Inactiv" + +#: db/models.py:51 +msgid "Active" +msgstr "Activ" + +#: db/models.py:53 +msgid "status" +msgstr "Stare" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "A se lăsa gol pentru activare imediată" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "A se lăsa gol pentru activare nelimitată" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "%s nu pare să fie un obiect urlpattern" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "Căutare" diff --git a/awx/lib/site-packages/django_extensions/locale/ru/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/ru/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..c02cf9cdb3 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/ru/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/ru/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/ru/LC_MESSAGES/django.po new file mode 100644 index 0000000000..fd0bd4d26a --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/ru/LC_MESSAGES/django.po @@ -0,0 +1,78 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: django-extensions\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: 2011-02-02 10:42+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ru\n" +"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%" +"10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" + +#: db/models.py:15 +msgid "created" +msgstr "" + +#: db/models.py:16 +msgid "modified" +msgstr "" + +#: db/models.py:26 +msgid "title" +msgstr "" + +#: db/models.py:27 +msgid "slug" +msgstr "" + +#: db/models.py:28 +msgid "description" +msgstr "" + +#: db/models.py:50 +msgid "Inactive" +msgstr "" + +#: db/models.py:51 +msgid "Active" +msgstr "" + +#: db/models.py:53 +msgid "status" +msgstr "" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "" diff --git a/awx/lib/site-packages/django_extensions/locale/sk/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/sk/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..3fdc5cfe4e Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/sk/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/sk/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/sk/LC_MESSAGES/django.po new file mode 100644 index 0000000000..33964168ae --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/sk/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: django-extensions\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: 2011-02-02 10:42+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sk\n" +"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" + +#: db/models.py:15 +msgid "created" +msgstr "" + +#: db/models.py:16 +msgid "modified" +msgstr "" + +#: db/models.py:26 +msgid "title" +msgstr "" + +#: db/models.py:27 +msgid "slug" +msgstr "" + +#: db/models.py:28 +msgid "description" +msgstr "" + +#: db/models.py:50 +msgid "Inactive" +msgstr "" + +#: db/models.py:51 +msgid "Active" +msgstr "" + +#: db/models.py:53 +msgid "status" +msgstr "" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "" diff --git a/awx/lib/site-packages/django_extensions/locale/tr/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/tr/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..800d1fe154 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/tr/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/tr/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/tr/LC_MESSAGES/django.po new file mode 100644 index 0000000000..45e600a16d --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/tr/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: django-extensions\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: 2011-02-02 10:42+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tr\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" + +#: db/models.py:15 +msgid "created" +msgstr "" + +#: db/models.py:16 +msgid "modified" +msgstr "" + +#: db/models.py:26 +msgid "title" +msgstr "" + +#: db/models.py:27 +msgid "slug" +msgstr "" + +#: db/models.py:28 +msgid "description" +msgstr "" + +#: db/models.py:50 +msgid "Inactive" +msgstr "" + +#: db/models.py:51 +msgid "Active" +msgstr "" + +#: db/models.py:53 +msgid "status" +msgstr "" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "" diff --git a/awx/lib/site-packages/django_extensions/locale/zh_CN/LC_MESSAGES/django.mo b/awx/lib/site-packages/django_extensions/locale/zh_CN/LC_MESSAGES/django.mo new file mode 100644 index 0000000000..5276f0c207 Binary files /dev/null and b/awx/lib/site-packages/django_extensions/locale/zh_CN/LC_MESSAGES/django.mo differ diff --git a/awx/lib/site-packages/django_extensions/locale/zh_CN/LC_MESSAGES/django.po b/awx/lib/site-packages/django_extensions/locale/zh_CN/LC_MESSAGES/django.po new file mode 100644 index 0000000000..93547531e1 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/locale/zh_CN/LC_MESSAGES/django.po @@ -0,0 +1,77 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: django-extensions\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-02 11:43+0100\n" +"PO-Revision-Date: 2011-02-02 10:42+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=1; plural=0\n" + +#: admin/__init__.py:121 +msgid "and" +msgstr "" + +#: admin/__init__.py:123 +#, python-format +msgid "" +"Use the left field to do %(model_name)s lookups in the fields %(field_list)s." +msgstr "" + +#: db/models.py:15 +msgid "created" +msgstr "" + +#: db/models.py:16 +msgid "modified" +msgstr "" + +#: db/models.py:26 +msgid "title" +msgstr "" + +#: db/models.py:27 +msgid "slug" +msgstr "" + +#: db/models.py:28 +msgid "description" +msgstr "" + +#: db/models.py:50 +msgid "Inactive" +msgstr "" + +#: db/models.py:51 +msgid "Active" +msgstr "" + +#: db/models.py:53 +msgid "status" +msgstr "" + +#: db/models.py:56 +msgid "keep empty for an immediate activation" +msgstr "" + +#: db/models.py:58 +msgid "keep empty for indefinite activation" +msgstr "" + +#: management/commands/show_urls.py:34 +#, python-format +msgid "%s does not appear to be a urlpattern object" +msgstr "" + +#: templates/django_extensions/widgets/foreignkey_searchinput.html:4 +msgid "Lookup" +msgstr "" diff --git a/awx/lib/site-packages/django_extensions/management/commands/create_command.py b/awx/lib/site-packages/django_extensions/management/commands/create_command.py index 8b4b21aa93..6339b462a1 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/create_command.py +++ b/awx/lib/site-packages/django_extensions/management/commands/create_command.py @@ -1,6 +1,6 @@ import os import sys -from django.core.management.base import CommandError, AppCommand +from django.core.management.base import AppCommand from django_extensions.management.utils import _make_writeable from optparse import make_option @@ -14,7 +14,7 @@ class Command(AppCommand): ) help = ("Creates a Django management command directory structure for the given app name" - " in the current directory.") + " in the app's directory.") args = "[appname]" label = 'application name' @@ -24,16 +24,8 @@ class Command(AppCommand): can_import_settings = True def handle_app(self, app, **options): - directory = os.getcwd() - app_name = app.__name__.split('.')[-2] - project_dir = os.path.join(directory, app_name) - if not os.path.exists(project_dir): - try: - os.mkdir(project_dir) - except OSError as e: - raise CommandError(e) - - copy_template('command_template', project_dir, options.get('command_name'), '%sCommand' % options.get('base_command')) + app_dir = os.path.dirname(app.__file__) + copy_template('command_template', app_dir, options.get('command_name'), '%sCommand' % options.get('base_command')) def copy_template(template_name, copy_to, command_name, base_command): diff --git a/awx/lib/site-packages/django_extensions/management/commands/create_template_tags.py b/awx/lib/site-packages/django_extensions/management/commands/create_template_tags.py new file mode 100644 index 0000000000..d8f33c6cc0 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/management/commands/create_template_tags.py @@ -0,0 +1,68 @@ +import os +import sys +from django.core.management.base import AppCommand +from django_extensions.management.utils import _make_writeable +from optparse import make_option + + +class Command(AppCommand): + option_list = AppCommand.option_list + ( + make_option('--name', '-n', action='store', dest='tag_library_name', default='appname_tags', + help='The name to use for the template tag base name. Defaults to `appname`_tags.'), + make_option('--base', '-b', action='store', dest='base_command', default='Base', + help='The base class used for implementation of this command. Should be one of Base, App, Label, or NoArgs'), + ) + + help = ("Creates a Django template tags directory structure for the given app name" + " in the apps's directory") + args = "[appname]" + label = 'application name' + + requires_model_validation = False + # Can't import settings during this command, because they haven't + # necessarily been created. + can_import_settings = True + + def handle_app(self, app, **options): + app_dir = os.path.dirname(app.__file__) + tag_library_name = options.get('tag_library_name') + if tag_library_name == 'appname_tags': + tag_library_name = '%s_tags' % os.path.basename(app_dir) + copy_template('template_tags_template', app_dir, tag_library_name) + + +def copy_template(template_name, copy_to, tag_library_name): + """copies the specified template directory to the copy_to location""" + import django_extensions + import shutil + + template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name) + + # walks the template structure and copies it + for d, subdirs, files in os.walk(template_dir): + relative_dir = d[len(template_dir) + 1:] + if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)): + os.mkdir(os.path.join(copy_to, relative_dir)) + for i, subdir in enumerate(subdirs): + if subdir.startswith('.'): + del subdirs[i] + for f in files: + if f.endswith('.pyc') or f.startswith('.DS_Store'): + continue + path_old = os.path.join(d, f) + path_new = os.path.join(copy_to, relative_dir, f.replace('sample', tag_library_name)) + if os.path.exists(path_new): + path_new = os.path.join(copy_to, relative_dir, f) + if os.path.exists(path_new): + continue + path_new = path_new.rstrip(".tmpl") + fp_old = open(path_old, 'r') + fp_new = open(path_new, 'w') + fp_new.write(fp_old.read()) + fp_old.close() + fp_new.close() + try: + shutil.copymode(path_old, path_new) + _make_writeable(path_new) + except OSError: + sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new) diff --git a/awx/lib/site-packages/django_extensions/management/commands/dumpscript.py b/awx/lib/site-packages/django_extensions/management/commands/dumpscript.py index 5caf40efa3..d00fb567d3 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/dumpscript.py +++ b/awx/lib/site-packages/django_extensions/management/commands/dumpscript.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Title: Dumpscript management command @@ -35,7 +34,7 @@ import datetime import six import django -from django.db.models import AutoField, BooleanField, FileField, ForeignKey +from django.db.models import AutoField, BooleanField, FileField, ForeignKey, DateField, DateTimeField from django.core.exceptions import ObjectDoesNotExist from django.core.management.base import BaseCommand @@ -329,9 +328,12 @@ class InstanceCode(Code): # TODO: check if batches are really needed. If not, remove them. sub_objects = sum([list(i) for i in collector.data.values()], []) - for batch in collector.batches.values(): - # batch.values can be sets, which must be converted to lists - sub_objects += sum([list(i) for i in batch.values()], []) + if hasattr(collector, 'batches'): + # Django 1.6 removed batches for being dead code + # https://github.com/django/django/commit/a170c3f755351beb35f8166ec3c7e9d524d9602 + for batch in collector.batches.values(): + # batch.values can be sets, which must be converted to lists + sub_objects += sum([list(i) for i in batch.values()], []) sub_objects_parents = [so._meta.parents for so in sub_objects] if [self.model in p for p in sub_objects_parents].count(True) == 1: @@ -624,6 +626,12 @@ import datetime from decimal import Decimal from django.contrib.contenttypes.models import ContentType +try: + import dateutil.parser +except ImportError: + print("Please install python-dateutil") + sys.exit(os.EX_USAGE) + def run(): importer.pre_import() importer.run_import(import_data) @@ -706,6 +714,9 @@ def get_attribute_value(item, field, context, force=False): else: raise DoLater('(FK) %s.%s\n' % (item.__class__.__name__, field.name)) + elif isinstance(field, (DateField, DateTimeField)): + return "dateutil.parser.parse(\"%s\")" % value.isoformat() + # A normal field (e.g. a python built-in) else: return repr(value) diff --git a/awx/lib/site-packages/django_extensions/management/commands/graph_models.py b/awx/lib/site-packages/django_extensions/management/commands/graph_models.py index 00942d0803..5d5873d472 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/graph_models.py +++ b/awx/lib/site-packages/django_extensions/management/commands/graph_models.py @@ -1,3 +1,4 @@ +import six import sys from optparse import make_option, NO_DEFAULT from django.core.management.base import BaseCommand, CommandError @@ -45,7 +46,7 @@ class Command(BaseCommand): make_option('--inheritance', '-e', action='store_true', dest='inheritance', default=True, help='Include inheritance arrows (default)'), make_option('--no-inheritance', '-E', action='store_false', dest='inheritance', - help='Include inheritance arrows'), + help='Do not include inheritance arrows'), make_option('--hide-relations-from-fields', '-R', action='store_false', dest="relations_as_fields", default=True, help="Do not show relations as fields in the graph."), make_option('--disable-sort-fields', '-S', action="store_false", dest="sort_fields", @@ -101,6 +102,9 @@ class Command(BaseCommand): options[option.dest] = defaults[long_opt] def print_output(self, dotdata): + if six.PY3 and isinstance(dotdata, six.binary_type): + dotdata = dotdata.decode() + print(dotdata) def render_output_pygraphviz(self, dotdata, **kwargs): diff --git a/awx/lib/site-packages/django_extensions/management/commands/pipchecker.py b/awx/lib/site-packages/django_extensions/management/commands/pipchecker.py index 4fff74edae..c853338731 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/pipchecker.py +++ b/awx/lib/site-packages/django_extensions/management/commands/pipchecker.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python import os import pip import sys diff --git a/awx/lib/site-packages/django_extensions/management/commands/reset_db.py b/awx/lib/site-packages/django_extensions/management/commands/reset_db.py index a94ef77665..df96d5a64c 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/reset_db.py +++ b/awx/lib/site-packages/django_extensions/management/commands/reset_db.py @@ -20,6 +20,9 @@ class Command(BaseCommand): make_option('-U', '--user', action='store', dest='user', default=None, help='Use another user for the database then defined in settings.py'), + make_option('-O', '--owner', action='store', + dest='owner', default=None, + help='Use another owner for creating the database then the user defined in settings or via --user'), make_option('-P', '--password', action='store', dest='password', default=None, help='Use another password for the database then defined in settings.py'), @@ -39,6 +42,10 @@ class Command(BaseCommand): Note: Transaction wrappers are in reverse as a work around for autocommit, anybody know how to do this the right way? """ + + if args: + raise CommandError("reset_db takes no arguments") + router = options.get('router') dbinfo = settings.DATABASES.get(router) if dbinfo is None: @@ -47,6 +54,7 @@ class Command(BaseCommand): engine = dbinfo.get('ENGINE').split('.')[-1] user = options.get('user') or dbinfo.get('USER') password = options.get('password') or dbinfo.get('PASSWORD') + owner = options.get('owner') or user database_name = options.get('dbname') or dbinfo.get('NAME') if database_name == '': @@ -130,7 +138,8 @@ Type 'yes' to continue, or 'no' to cancel: """ % (database_name,)) logging.info("Error: %s" % str(e)) create_query = "CREATE DATABASE %s" % database_name - create_query += " WITH OWNER = %s " % user + if owner: + create_query += " WITH OWNER = \"%s\" " % owner create_query += " ENCODING = 'UTF8'" if engine == 'postgis': diff --git a/awx/lib/site-packages/django_extensions/management/commands/set_default_site.py b/awx/lib/site-packages/django_extensions/management/commands/set_default_site.py new file mode 100644 index 0000000000..6bb0f78082 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/management/commands/set_default_site.py @@ -0,0 +1,56 @@ +""" +set_default_site.py +""" +import socket +from optparse import make_option + +from django.core.management.base import NoArgsCommand, CommandError + + +class Command(NoArgsCommand): + option_list = NoArgsCommand.option_list + ( + make_option('--name', dest='site_name', default=None, + help='Use this as site name.'), + make_option('--domain', dest='site_domain', default=None, + help='Use this as site domain.'), + make_option('--system-fqdn', dest='set_as_system_fqdn', default=False, + action="store_true", help='Use the systems FQDN (Fully Qualified Domain Name) as name and domain. Can be used in combination with --name'), + ) + help = "Set parameters of the default django.contrib.sites Site" + requires_model_validation = True + + def handle_noargs(self, **options): + from django.contrib.sites.models import Site + + try: + site = Site.objects.get(pk=1) + except Site.DoesNotExist: + raise CommandError("Default site with pk=1 does not exist") + else: + name = options.get("site_name", None) + domain = options.get("site_domain", None) + if options.get('set_as_system_fqdn', False): + domain = socket.getfqdn() + if not domain: + raise CommandError("Cannot find systems FQDN") + if name is None: + name = domain + + update_kwargs = {} + if name and name != site.name: + update_kwargs["name"] = name + + if domain and domain != site.domain: + update_kwargs["domain"] = domain + + if update_kwargs: + Site.objects.filter(pk=1).update(**update_kwargs) + site = Site.objects.get(pk=1) + print("Updated default site. You might need to restart django as sites are cached aggressively.") + else: + print("Nothing to update (need --name, --domain and/or --system-fqdn)") + + print("Default Site:") + print("\tid = %s" % site.id) + print("\tname = %s" % site.name) + print("\tdomain = %s" % site.domain) diff --git a/awx/lib/site-packages/django_extensions/management/commands/shell_plus.py b/awx/lib/site-packages/django_extensions/management/commands/shell_plus.py index 0ab0cdda0b..707dd6cb07 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/shell_plus.py +++ b/awx/lib/site-packages/django_extensions/management/commands/shell_plus.py @@ -68,8 +68,12 @@ class Command(NoArgsCommand): def run_notebook(): from django.conf import settings - from IPython.frontend.html.notebook import notebookapp - app = notebookapp.NotebookApp.instance() + try: + from IPython.html.notebookapp import NotebookApp + except ImportError: + from IPython.frontend.html.notebook import notebookapp + NotebookApp = notebookapp.NotebookApp + app = NotebookApp.instance() ipython_arguments = getattr(settings, 'IPYTHON_ARGUMENTS', ['--ext', 'django_extensions.management.notebook_extension']) app.initialize(ipython_arguments) app.start() diff --git a/awx/lib/site-packages/django_extensions/management/commands/show_templatetags.py b/awx/lib/site-packages/django_extensions/management/commands/show_templatetags.py index 63a5077f31..506f9f29f6 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/show_templatetags.py +++ b/awx/lib/site-packages/django_extensions/management/commands/show_templatetags.py @@ -1,11 +1,17 @@ -from django.conf import settings -from django.template import get_library import os +import six import inspect +from django.conf import settings from django.core.management.base import BaseCommand from django.core.management import color +from django.template import get_library from django.utils import termcolors +try: + from django.utils.encoding import smart_text +except ImportError: + smart_text = six.u + def color_style(): style = color.color_style() @@ -27,7 +33,7 @@ def format_block(block, nlspaces=0): import re # separate block into lines - lines = str(block).split('\n') + lines = smart_text(block).split('\n') # remove leading/trailing empty lines while lines and not lines[0]: @@ -58,7 +64,7 @@ class Command(BaseCommand): results = "" def add_result(self, s, depth=0): - self.results += '\n%s\n' % s.rjust(depth * 4 + len(s)) + self.results += '%s\n' % s.rjust(depth * 4 + len(s)) def handle(self, *args, **options): if args: @@ -87,7 +93,7 @@ class Command(BaseCommand): except: continue if not app_labeled: - self.add_result('\nApp: %s' % style.MODULE_NAME(app)) + self.add_result('App: %s' % style.MODULE_NAME(app)) app_labeled = True self.add_result('load: %s' % style.TAGLIB(taglib), 1) for items, label, style_func in [(lib.tags, 'Tag:', style.TAG), (lib.filters, 'Filter:', style.FILTER)]: diff --git a/awx/lib/site-packages/django_extensions/management/commands/sqlcreate.py b/awx/lib/site-packages/django_extensions/management/commands/sqlcreate.py index 8eacf127a6..3f749ded91 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/sqlcreate.py +++ b/awx/lib/site-packages/django_extensions/management/commands/sqlcreate.py @@ -1,16 +1,16 @@ -from optparse import make_option import sys import socket -import django -from django.core.management.base import CommandError, BaseCommand +from optparse import make_option + from django.conf import settings +from django.core.management.base import CommandError, BaseCommand class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('-R', '--router', action='store', - dest='router', default=None, + dest='router', default='default', help='Use this router-database other then defined in settings.py'), make_option('-D', '--drop', action='store_true', dest='drop', default=False, @@ -25,39 +25,18 @@ The envisioned use case is something like this: requires_model_validation = False can_import_settings = True - @staticmethod - def set_db_settings(**options): - if django.get_version() >= "1.2": - router = options.get('router') - if router is None: - return False - - # retrieve this with the 'using' argument - dbinfo = settings.DATABASES.get(router) - settings.DATABASE_ENGINE = dbinfo.get('ENGINE').split('.')[-1] - settings.DATABASE_USER = dbinfo.get('USER') - settings.DATABASE_PASSWORD = dbinfo.get('PASSWORD') - settings.DATABASE_NAME = dbinfo.get('NAME') - settings.DATABASE_HOST = dbinfo.get('HOST') - settings.DATABASE_PORT = dbinfo.get('PORT') - return True - else: - # settings are set for django < 1.2 no modification needed - return True - def handle(self, *args, **options): - if django.get_version() >= "1.2": - got_db_settings = self.set_db_settings(**options) - if not got_db_settings: - raise CommandError("You are using Django %s which requires to specify the db-router.\nPlease specify the router by adding --router= to this command." % django.get_version()) + router = options.get('router') + dbinfo = settings.DATABASES.get(router) + if dbinfo is None: + raise CommandError("Unknown database router %s" % router) - #print("%s %s %s %s" % (settings.DATABASE_ENGINE, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD)) - engine = settings.DATABASE_ENGINE - dbname = settings.DATABASE_NAME - dbuser = settings.DATABASE_USER - dbpass = settings.DATABASE_PASSWORD - dbhost = settings.DATABASE_HOST + engine = dbinfo.get('ENGINE').split('.')[-1] + dbuser = dbinfo.get('USER') + dbpass = dbinfo.get('PASSWORD') + dbname = dbinfo.get('NAME') + dbhost = dbinfo.get('HOST') dbclient = socket.gethostname() # django settings file tells you that localhost should be specified by leaving @@ -73,15 +52,19 @@ The envisioned use case is something like this: print("GRANT ALL PRIVILEGES ON %s.* to '%s'@'%s' identified by '%s';" % ( dbname, dbuser, dbclient, dbpass )) + elif engine == 'postgresql_psycopg2': if options.get('drop'): print("DROP DATABASE IF EXISTS %s;" % (dbname,)) print("DROP USER IF EXISTS %s;" % (dbuser,)) + print("CREATE USER %s WITH ENCRYPTED PASSWORD '%s' CREATEDB;" % (dbuser, dbpass)) print("CREATE DATABASE %s WITH ENCODING 'UTF-8' OWNER \"%s\";" % (dbname, dbuser)) print("GRANT ALL PRIVILEGES ON DATABASE %s TO %s;" % (dbname, dbuser)) + elif engine == 'sqlite3': sys.stderr.write("-- manage.py syncdb will automatically create a sqlite3 database file.\n") + else: # CREATE DATABASE is not SQL standard, but seems to be supported by most. sys.stderr.write("-- Don't know how to handle '%s' falling back to SQL.\n" % engine) diff --git a/awx/lib/site-packages/django_extensions/management/commands/sqldiff.py b/awx/lib/site-packages/django_extensions/management/commands/sqldiff.py index 12fe804038..2301f8de13 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/sqldiff.py +++ b/awx/lib/site-packages/django_extensions/management/commands/sqldiff.py @@ -20,6 +20,7 @@ KNOWN ISSUES: positives or false negatives. """ +import six from django.core.management.base import BaseCommand from django.core.management import sql as _sql from django.core.management import CommandError @@ -114,6 +115,8 @@ class SQLDiff(object): SQL_COMMENT = lambda self, style, qn, args: style.NOTICE('-- Comment: %s' % style.SQL_TABLE(args[0])) SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE('-- Table missing: %s' % args[0]) + can_detect_notnull_differ = False + def __init__(self, app_models, options): self.app_models = app_models self.options = options @@ -130,6 +133,8 @@ class SQLDiff(object): self.db_tables = self.introspection.get_table_list(self.cursor) self.differences = [] self.unknown_db_fields = {} + self.new_db_fields = set() + self.null = {} self.DIFF_SQL = { 'error': self.SQL_ERROR, @@ -148,6 +153,12 @@ class SQLDiff(object): 'notnull-differ': self.SQL_NOTNULL_DIFFER, } + if self.can_detect_notnull_differ: + self.load_null() + + def load_null(self): + raise NotImplementedError("load_null functions must be implemented if diff backend has 'can_detect_notnull_differ' set to True") + def add_app_model_marker(self, app_label, model_name): self.differences.append((app_label, model_name, [])) @@ -241,6 +252,12 @@ class SQLDiff(object): def get_field_db_type_lookup(self, type_code): return None + def get_field_db_nullable(self, field, table_name): + tablespace = field.db_tablespace + if tablespace == "": + tablespace = "public" + return self.null.get((tablespace, table_name, field.attname), 'fixme') + def strip_parameters(self, field_type): if field_type and field_type != 'double precision': return field_type.split(" ")[0].split("(")[0].lower() @@ -258,7 +275,7 @@ class SQLDiff(object): # TODO: Postgresql does not list unique_togethers in table_indexes # MySQL does fields = dict([(field.db_column or field.name, field.unique) for field in all_local_fields(meta)]) - for att_name, att_opts in table_indexes.iteritems(): + for att_name, att_opts in six.iteritems(table_indexes): if att_opts['unique'] and att_name in fields and not fields[att_name]: if att_name in flatten(meta.unique_together): continue @@ -278,7 +295,7 @@ class SQLDiff(object): def find_index_missing_in_model(self, meta, table_indexes, table_name): fields = dict([(field.name, field) for field in all_local_fields(meta)]) - for att_name, att_opts in table_indexes.iteritems(): + for att_name, att_opts in six.iteritems(table_indexes): if att_name in fields: field = fields[att_name] if field.db_index: @@ -301,7 +318,7 @@ class SQLDiff(object): def find_field_missing_in_db(self, fieldmap, table_description, table_name): db_fields = [row[0] for row in table_description] - for field_name, field in fieldmap.iteritems(): + for field_name, field in six.iteritems(fieldmap): if field_name not in db_fields: field_output = [] if field.rel: @@ -313,6 +330,7 @@ class SQLDiff(object): if not field.null: field_output.append('NOT NULL') self.add_difference(op, table_name, field_name, *field_output) + self.new_db_fields.add((table_name, field_name)) def find_field_type_differ(self, meta, table_description, table_name, func=None): db_fields = dict([(row[0], row) for row in table_description]) @@ -351,6 +369,18 @@ class SQLDiff(object): if not model_type == db_type: self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type) + def find_field_notnull_differ(self, meta, table_description, table_name): + if not self.can_detect_notnull_differ: + return + + for field in all_local_fields(meta): + if (table_name, field.attname) in self.new_db_fields: + continue + null = self.get_field_db_nullable(field, table_name) + if field.null != null: + action = field.null and 'DROP' or 'SET' + self.add_difference('notnull-differ', table_name, field.attname, action) + @transaction.commit_manually def find_differences(self): cur_app_label = None @@ -384,6 +414,7 @@ class SQLDiff(object): continue else: transaction.commit() + # Fields which are defined in database but not in model # 1) find: 'unique-missing-in-model' self.find_unique_missing_in_model(meta, table_indexes, table_name) @@ -405,6 +436,8 @@ class SQLDiff(object): self.find_field_type_differ(meta, table_description, table_name) # 8) find: 'type-parameter-differs' self.find_field_parameter_differ(meta, table_description, table_name) + # 9) find: 'field-notnull' + self.find_field_notnull_differ(meta, table_description, table_name) def print_diff(self, style=no_style()): """ print differences to stdout """ @@ -414,6 +447,10 @@ class SQLDiff(object): self.print_diff_text(style) def print_diff_text(self, style): + if not self.can_detect_notnull_differ: + print(style.NOTICE("# Detecting notnull changes not implemented for this database backend")) + print("") + cur_app_label = None for app_label, model_name, diffs in self.differences: if not diffs: @@ -433,6 +470,10 @@ class SQLDiff(object): print("%s %s %s %s %s" % (style.NOTICE("App"), style.SQL_TABLE(app_label), style.NOTICE('Model'), style.SQL_TABLE(model_name), text)) def print_diff_sql(self, style): + if not self.can_detect_notnull_differ: + print(style.NOTICE("-- Detecting notnull changes not implemented for this database backend")) + print("") + cur_app_label = None qn = connection.ops.quote_name has_differences = max([len(diffs) for app_label, model_name, diffs in self.differences]) @@ -459,10 +500,12 @@ class SQLDiff(object): class GenericSQLDiff(SQLDiff): - pass + can_detect_notnull_differ = False class MySQLDiff(SQLDiff): + can_detect_notnull_differ = False + # All the MySQL hacks together create something of a problem # Fixing one bug in MySQL creates another issue. So just keep in mind # that this is way unreliable for MySQL atm. @@ -496,6 +539,18 @@ class MySQLDiff(SQLDiff): class SqliteSQLDiff(SQLDiff): + can_detect_notnull_differ = True + + def load_null(self): + for table_name in self.db_tables: + # sqlite does not support tablespaces + tablespace = "public" + # index, column_name, column_type, nullable, default_value + # see: http://www.sqlite.org/pragma.html#pragma_table_info + for table_info in self.sql_to_dict("PRAGMA table_info(%s);" % table_name, []): + key = (tablespace, table_name, table_info['name']) + self.null[key] = not table_info['notnull'] + # Unique does not seem to be implied on Sqlite for Primary_key's # if this is more generic among databases this might be usefull # to add to the superclass's find_unique_missing_in_db method @@ -530,6 +585,8 @@ class SqliteSQLDiff(SQLDiff): class PostgresqlSQLDiff(SQLDiff): + can_detect_notnull_differ = True + DATA_TYPES_REVERSE_OVERRIDE = { 1042: 'CharField', # postgis types (TODO: support is very incomplete) @@ -565,9 +622,7 @@ class PostgresqlSQLDiff(SQLDiff): def __init__(self, app_models, options): SQLDiff.__init__(self, app_models, options) self.check_constraints = {} - self.null = {} self.load_constraints() - self.load_null() def load_null(self): for dct in self.sql_to_dict(self.SQL_LOAD_NULL, []): @@ -601,10 +656,6 @@ class PostgresqlSQLDiff(SQLDiff): check_constraint = '("'.join([')' in e and '" '.join(p.strip('"') for p in e.split(" ", 1)) or e for e in check_constraint.split("(")]) # TODO: might be more then one constraint in definition ? db_type += ' ' + check_constraint - null = self.null.get((tablespace, table_name, field.attname), 'fixme') - if field.null != null: - action = field.null and 'DROP' or 'SET' - self.add_difference('notnull-differ', table_name, field.name, action) return db_type @transaction.autocommit @@ -657,10 +708,6 @@ to check/debug ur models compared to the real database tables and columns.""" args = '' def handle(self, *app_labels, **options): - from django import VERSION - if VERSION[:2] < (1, 0): - raise CommandError("SQLDiff only support Django 1.0 or higher!") - from django.db import models from django.conf import settings diff --git a/awx/lib/site-packages/django_extensions/management/commands/sync_s3.py b/awx/lib/site-packages/django_extensions/management/commands/sync_s3.py index 00fcae3e42..d00208cabe 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/sync_s3.py +++ b/awx/lib/site-packages/django_extensions/management/commands/sync_s3.py @@ -10,6 +10,9 @@ This command can optionally do the following but it is off by default: * gzip compress any CSS and Javascript files it finds and adds the appropriate 'Content-Encoding' header. * set a far future 'Expires' header for optimal caching. +* upload only media or static files. +* use any other provider compatible with Amazon S3. +* set other than 'public-read' ACL. Note: This script requires the Python boto library and valid Amazon Web Services API keys. @@ -33,18 +36,20 @@ Command options are: --force Skip the file mtime check to force upload of all files. --filter-list Override default directory and file exclusion - filters. (enter as comma seperated line) + filters. (enter as comma separated line) --renamegzip Enables renaming of gzipped files by appending '.gz'. to the original file name. This way your original assets will not be replaced by the gzipped ones. You can change the extension setting the `SYNC_S3_RENAME_GZIP_EXT` var in your settings.py file. - --invalidate Invalidates the objects in CloudFront after uploaading + --invalidate Invalidates the objects in CloudFront after uploading stuff to s3. --media-only Only MEDIA_ROOT files will be uploaded to S3. --static-only Only STATIC_ROOT files will be uploaded to S3. - + --s3host Override default s3 host. + --acl Override default ACL settings ('public-read' if + settings.AWS_DEFAULT_ACL is not defined). TODO: * Use fnmatch (or regex) to allow more complex FILTER_LIST rules. @@ -105,6 +110,14 @@ class Command(BaseCommand): make_option('-d', '--dir', dest='dir', help="Custom static root directory to use"), + make_option('--s3host', + dest='s3host', + default=getattr(settings, 'AWS_S3_HOST', ''), + help="The s3 host (enables connecting to other providers/regions)"), + make_option('--acl', + dest='acl', + default=getattr(settings, 'AWS_DEFAULT_ACL', 'public-read'), + help="Enables to override default acl (public-read)."), make_option('--gzip', action='store_true', dest='gzip', default=False, help="Enables gzipping CSS and Javascript files."), @@ -173,6 +186,8 @@ class Command(BaseCommand): self.do_force = options.get('force') self.invalidate = options.get('invalidate') self.DIRECTORIES = options.get('dir') + self.s3host = options.get('s3host') + self.default_acl = options.get('acl') self.FILTER_LIST = getattr(settings, 'FILTER_LIST', self.FILTER_LIST) filter_list = options.get('filter_list') if filter_list: @@ -255,11 +270,21 @@ class Command(BaseCommand): zfile.close() return zbuf.getvalue() + def get_s3connection_kwargs(self): + """Returns connection kwargs as a dict""" + kwargs = {} + if self.s3host: + kwargs['host'] = self.s3host + return kwargs + def open_s3(self): """ Opens connection to S3 returning bucket and key """ - conn = boto.connect_s3(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY) + conn = boto.connect_s3( + self.AWS_ACCESS_KEY_ID, + self.AWS_SECRET_ACCESS_KEY, + **self.get_s3connection_kwargs()) try: bucket = conn.get_bucket(self.AWS_BUCKET_NAME) except boto.exception.S3ResponseError: @@ -345,8 +370,8 @@ class Command(BaseCommand): try: key.name = file_key - key.set_contents_from_string(filedata, headers, replace=True) - key.set_acl('public-read') + key.set_contents_from_string(filedata, headers, replace=True, + policy=self.default_acl) except boto.exception.S3CreateError as e: print("Failed: %s" % e) except Exception as e: diff --git a/awx/lib/site-packages/django_extensions/management/modelviz.py b/awx/lib/site-packages/django_extensions/management/modelviz.py index ca2422c49b..9705820097 100644 --- a/awx/lib/site-packages/django_extensions/management/modelviz.py +++ b/awx/lib/site-packages/django_extensions/management/modelviz.py @@ -25,7 +25,7 @@ import os import datetime from django.utils.translation import activate as activate_language from django.utils.safestring import mark_safe -from django.template import Context, loader +from django.template import Context, loader, Template from django.db import models from django.db.models import get_models from django.db.models.fields.related import ForeignKey, OneToOneField, ManyToManyField, RelatedField @@ -126,14 +126,16 @@ def generate_dot(app_labels, **kwargs): continue if verbose_names and appmodel._meta.verbose_name: - model['label'] = appmodel._meta.verbose_name + model['label'] = appmodel._meta.verbose_name.decode("utf8") else: model['label'] = model['name'] # model attributes def add_attributes(field): if verbose_names and field.verbose_name: - label = field.verbose_name + label = field.verbose_name.decode("utf8") + if label.islower(): + label = label.capitalize() else: label = field.name @@ -159,13 +161,13 @@ def generate_dot(app_labels, **kwargs): # find primary key and print it first, ignoring implicit id if other pk exists pk = appmodel._meta.pk - if not appmodel._meta.abstract and pk in attributes: + if pk and not appmodel._meta.abstract and pk in attributes: add_attributes(pk) for field in attributes: if skip_field(field): continue - if field == pk: + if pk and field == pk: continue add_attributes(field) @@ -182,13 +184,18 @@ def generate_dot(app_labels, **kwargs): # relations def add_relation(field, extras=""): if verbose_names and field.verbose_name: - label = field.verbose_name + label = field.verbose_name.decode("utf8") + if label.islower(): + label = label.capitalize() else: label = field.name # show related field name if hasattr(field, 'related_query_name'): - label += ' (%s)' % field.related_query_name() + related_query_name = field.related_query_name() + if verbose_names and related_query_name.islower(): + related_query_name = related_query_name.replace('_', ' ').capitalize() + label += ' (%s)' % related_query_name # handle self-relationships if field.rel.to == 'self': @@ -269,6 +276,12 @@ def generate_dot(app_labels, **kwargs): now = datetime.datetime.now() t = loader.get_template('django_extensions/graph_models/digraph.dot') + + if not isinstance(t, Template): + raise Exception("Default Django template loader isn't used. " + "This can lead to the incorrect template rendering. " + "Please, check the settings.") + c = Context({ 'created_at': now.strftime("%Y-%m-%d %H:%M"), 'cli_options': cli_options, diff --git a/awx/lib/site-packages/django_extensions/management/shells.py b/awx/lib/site-packages/django_extensions/management/shells.py index e92d2cd1ea..ff2c9d3a0a 100644 --- a/awx/lib/site-packages/django_extensions/management/shells.py +++ b/awx/lib/site-packages/django_extensions/management/shells.py @@ -1,3 +1,5 @@ +import six +import traceback class ObjectImportError(Exception): @@ -57,10 +59,17 @@ def import_objects(options, style): # models from installed apps. (this is fixed by now, but leaving it here # for people using 0.96 or older trunk (pre [5919]) versions. from django.db.models.loading import get_models, get_apps + mongoengine = False + try: + from mongoengine.base import _document_registry + mongoengine = True + except: + pass + loaded_models = get_models() # NOQA from django.conf import settings - imported_objects = {'settings': settings} + imported_objects = {} dont_load_cli = options.get('dont_load') # optparse will set this to [] if it doensnt exists dont_load_conf = getattr(settings, 'SHELL_PLUS_DONT_LOAD', []) @@ -71,9 +80,31 @@ def import_objects(options, style): # Perform pre-imports before any other imports imports = import_items(getattr(settings, 'SHELL_PLUS_PRE_IMPORTS', {})) - for k, v in imports.items(): + for k, v in six.iteritems(imports): imported_objects[k] = v + load_models = {} + + if getattr(settings, 'SHELL_PLUS_DJANGO_IMPORTS', True): + load_models.update({ + 'django.core.cache': ['cache'], + 'django.core.urlresolvers': ['reverse'], + 'django.conf': ['settings'], + 'django.db': ['transaction'], + 'django.db.models': ['Avg', 'Count', 'F', 'Max', 'Min', 'Sum', 'Q'], + 'django.utils': ['timezone'], + }) + + if mongoengine: + for name, mod in six.iteritems(_document_registry): + name = name.split('.')[-1] + app_name = mod.__module__.split('.')[-2] + if app_name in dont_load or ("%s.%s" % (app_name, name)) in dont_load: + continue + + load_models.setdefault(mod.__module__, []) + load_models[mod.__module__].append(name) + for app_mod in get_apps(): app_models = get_models(app_mod) if not app_models: @@ -83,13 +114,22 @@ def import_objects(options, style): if app_name in dont_load: continue + app_aliases = model_aliases.get(app_name, {}) + for mod in app_models: + if "%s.%s" % (app_name, mod.__name__) in dont_load: + continue + + load_models.setdefault(mod.__module__, []) + load_models[mod.__module__].append(mod.__name__) + + for app_mod, models in sorted(six.iteritems(load_models)): + app_name = app_mod.split('.')[-2] app_aliases = model_aliases.get(app_name, {}) model_labels = [] - for model in app_models: + for model_name in sorted(models): try: - imported_object = getattr(__import__(app_mod.__name__, {}, {}, model.__name__), model.__name__) - model_name = model.__name__ + imported_object = getattr(__import__(app_mod, {}, {}, model_name), model_name) if "%s.%s" % (app_name, model_name) in dont_load: continue @@ -102,15 +142,18 @@ def import_objects(options, style): model_labels.append("%s (as %s)" % (model_name, alias)) except AttributeError as e: + if options.get("traceback"): + traceback.print_exc() if not quiet_load: - print(style.ERROR("Failed to import '%s' from '%s' reason: %s" % (model.__name__, app_name, str(e)))) + print(style.ERROR("Failed to import '%s' from '%s' reason: %s" % (model_name, app_mod, str(e)))) continue + if not quiet_load: - print(style.SQL_COLTYPE("From '%s' autoload: %s" % (app_mod.__name__.split('.')[-2], ", ".join(model_labels)))) + print(style.SQL_COLTYPE("from %s import %s" % (app_mod, ", ".join(model_labels)))) # Perform post-imports after any other imports imports = import_items(getattr(settings, 'SHELL_PLUS_POST_IMPORTS', {})) - for k, v in imports.items(): + for k, v in six.iteritems(imports): imported_objects[k] = v return imported_objects diff --git a/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery-1.7.2.min.js b/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery-1.7.2.min.js new file mode 100644 index 0000000000..16ad06c5ac --- /dev/null +++ b/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery-1.7.2.min.js @@ -0,0 +1,4 @@ +/*! jQuery v1.7.2 jquery.com | jquery.org/license */ +(function(a,b){function cy(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cu(a){if(!cj[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){ck||(ck=c.createElement("iframe"),ck.frameBorder=ck.width=ck.height=0),b.appendChild(ck);if(!cl||!ck.createElement)cl=(ck.contentWindow||ck.contentDocument).document,cl.write((f.support.boxModel?"":"")+""),cl.close();d=cl.createElement(a),cl.body.appendChild(d),e=f.css(d,"display"),b.removeChild(ck)}cj[a]=e}return cj[a]}function ct(a,b){var c={};f.each(cp.concat.apply([],cp.slice(0,b)),function(){c[this]=a});return c}function cs(){cq=b}function cr(){setTimeout(cs,0);return cq=f.now()}function ci(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ch(){try{return new a.XMLHttpRequest}catch(b){}}function cb(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g0){if(c!=="border")for(;e=0===c})}function S(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function K(){return!0}function J(){return!1}function n(a,b,c){var d=b+"defer",e=b+"queue",g=b+"mark",h=f._data(a,d);h&&(c==="queue"||!f._data(a,e))&&(c==="mark"||!f._data(a,g))&&setTimeout(function(){!f._data(a,e)&&!f._data(a,g)&&(f.removeData(a,d,!0),h.fire())},0)}function m(a){for(var b in a){if(b==="data"&&f.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function l(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(k,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNumeric(d)?+d:j.test(d)?f.parseJSON(d):d}catch(g){}f.data(a,c,d)}else d=b}return d}function h(a){var b=g[a]={},c,d;a=a.split(/\s+/);for(c=0,d=a.length;c)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=/-([a-z]|[0-9])/ig,w=/^-ms-/,x=function(a,b){return(b+"").toUpperCase()},y=d.userAgent,z,A,B,C=Object.prototype.toString,D=Object.prototype.hasOwnProperty,E=Array.prototype.push,F=Array.prototype.slice,G=String.prototype.trim,H=Array.prototype.indexOf,I={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=m.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.7.2",length:0,size:function(){return this.length},toArray:function(){return F.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?E.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),A.add(a);return this},eq:function(a){a=+a;return a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(F.apply(this,arguments),"slice",F.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:E,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j0)return;A.fireWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").off("ready")}},bindReady:function(){if(!A){A=e.Callbacks("once memory");if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",B,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",B),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&J()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a!=null&&a==a.window},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):I[C.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!D.call(a,"constructor")&&!D.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||D.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw new Error(a)},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){if(typeof c!="string"||!c)return null;var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(w,"ms-").replace(v,x)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i1?i.call(arguments,0):b,j.notifyWith(k,e)}}function l(a){return function(c){b[a]=arguments.length>1?i.call(arguments,0):c,--g||j.resolveWith(j,b)}}var b=i.call(arguments,0),c=0,d=b.length,e=Array(d),g=d,h=d,j=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred(),k=j.promise();if(d>1){for(;c
a",d=p.getElementsByTagName("*"),e=p.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=p.getElementsByTagName("input")[0],b={leadingWhitespace:p.firstChild.nodeType===3,tbody:!p.getElementsByTagName("tbody").length,htmlSerialize:!!p.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:p.className!=="t",enctype:!!c.createElement("form").enctype,html5Clone:c.createElement("nav").cloneNode(!0).outerHTML!=="<:nav>",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,pixelMargin:!0},f.boxModel=b.boxModel=c.compatMode==="CSS1Compat",i.checked=!0,b.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,b.optDisabled=!h.disabled;try{delete p.test}catch(r){b.deleteExpando=!1}!p.addEventListener&&p.attachEvent&&p.fireEvent&&(p.attachEvent("onclick",function(){b.noCloneEvent=!1}),p.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),b.radioValue=i.value==="t",i.setAttribute("checked","checked"),i.setAttribute("name","t"),p.appendChild(i),j=c.createDocumentFragment(),j.appendChild(p.lastChild),b.checkClone=j.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=i.checked,j.removeChild(i),j.appendChild(p);if(p.attachEvent)for(n in{submit:1,change:1,focusin:1})m="on"+n,o=m in p,o||(p.setAttribute(m,"return;"),o=typeof p[m]=="function"),b[n+"Bubbles"]=o;j.removeChild(p),j=g=h=p=i=null,f(function(){var d,e,g,h,i,j,l,m,n,q,r,s,t,u=c.getElementsByTagName("body")[0];!u||(m=1,t="padding:0;margin:0;border:",r="position:absolute;top:0;left:0;width:1px;height:1px;",s=t+"0;visibility:hidden;",n="style='"+r+t+"5px solid #000;",q="
"+""+"
",d=c.createElement("div"),d.style.cssText=s+"width:0;height:0;position:static;top:0;margin-top:"+m+"px",u.insertBefore(d,u.firstChild),p=c.createElement("div"),d.appendChild(p),p.innerHTML="
t
",k=p.getElementsByTagName("td"),o=k[0].offsetHeight===0,k[0].style.display="",k[1].style.display="none",b.reliableHiddenOffsets=o&&k[0].offsetHeight===0,a.getComputedStyle&&(p.innerHTML="",l=c.createElement("div"),l.style.width="0",l.style.marginRight="0",p.style.width="2px",p.appendChild(l),b.reliableMarginRight=(parseInt((a.getComputedStyle(l,null)||{marginRight:0}).marginRight,10)||0)===0),typeof p.style.zoom!="undefined"&&(p.innerHTML="",p.style.width=p.style.padding="1px",p.style.border=0,p.style.overflow="hidden",p.style.display="inline",p.style.zoom=1,b.inlineBlockNeedsLayout=p.offsetWidth===3,p.style.display="block",p.style.overflow="visible",p.innerHTML="
",b.shrinkWrapBlocks=p.offsetWidth!==3),p.style.cssText=r+s,p.innerHTML=q,e=p.firstChild,g=e.firstChild,i=e.nextSibling.firstChild.firstChild,j={doesNotAddBorder:g.offsetTop!==5,doesAddBorderForTableAndCells:i.offsetTop===5},g.style.position="fixed",g.style.top="20px",j.fixedPosition=g.offsetTop===20||g.offsetTop===15,g.style.position=g.style.top="",e.style.overflow="hidden",e.style.position="relative",j.subtractsBorderForOverflowNotVisible=g.offsetTop===-5,j.doesNotIncludeMarginInBodyOffset=u.offsetTop!==m,a.getComputedStyle&&(p.style.marginTop="1%",b.pixelMargin=(a.getComputedStyle(p,null)||{marginTop:0}).marginTop!=="1%"),typeof d.style.zoom!="undefined"&&(d.style.zoom=1),u.removeChild(d),l=p=d=null,f.extend(b,j))});return b}();var j=/^(?:\{.*\}|\[.*\])$/,k=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!m(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i,j=f.expando,k=typeof c=="string",l=a.nodeType,m=l?f.cache:a,n=l?a[j]:a[j]&&j,o=c==="events";if((!n||!m[n]||!o&&!e&&!m[n].data)&&k&&d===b)return;n||(l?a[j]=n=++f.uuid:n=j),m[n]||(m[n]={},l||(m[n].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?m[n]=f.extend(m[n],c):m[n].data=f.extend(m[n].data,c);g=h=m[n],e||(h.data||(h.data={}),h=h.data),d!==b&&(h[f.camelCase(c)]=d);if(o&&!h[c])return g.events;k?(i=h[c],i==null&&(i=h[f.camelCase(c)])):i=h;return i}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e,g,h=f.expando,i=a.nodeType,j=i?f.cache:a,k=i?a[h]:h;if(!j[k])return;if(b){d=c?j[k]:j[k].data;if(d){f.isArray(b)||(b in d?b=[b]:(b=f.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,g=b.length;e1,null,!1)},removeData:function(a){return this.each(function(){f.removeData(this,a)})}}),f.extend({_mark:function(a,b){a&&(b=(b||"fx")+"mark",f._data(a,b,(f._data(a,b)||0)+1))},_unmark:function(a,b,c){a!==!0&&(c=b,b=a,a=!1);if(b){c=c||"fx";var d=c+"mark",e=a?0:(f._data(b,d)||1)-1;e?f._data(b,d,e):(f.removeData(b,d,!0),n(b,c,"mark"))}},queue:function(a,b,c){var d;if(a){b=(b||"fx")+"queue",d=f._data(a,b),c&&(!d||f.isArray(c)?d=f._data(a,b,f.makeArray(c)):d.push(c));return d||[]}},dequeue:function(a,b){b=b||"fx";var c=f.queue(a,b),d=c.shift(),e={};d==="inprogress"&&(d=c.shift()),d&&(b==="fx"&&c.unshift("inprogress"),f._data(a,b+".run",e),d.call(a,function(){f.dequeue(a,b)},e)),c.length||(f.removeData(a,b+"queue "+b+".run",!0),n(a,b,"queue"))}}),f.fn.extend({queue:function(a,c){var d=2;typeof a!="string"&&(c=a,a="fx",d--);if(arguments.length1)},removeAttr:function(a){return this.each(function(){f.removeAttr(this,a)})},prop:function(a,b){return f.access(this,f.prop,a,b,arguments.length>1)},removeProp:function(a){a=f.propFix[a]||a;return this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,g,h,i;if(f.isFunction(a))return this.each(function(b){f(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(p);for(c=0,d=this.length;c-1)return!0;return!1},val:function(a){var c,d,e,g=this[0];{if(!!arguments.length){e=f.isFunction(a);return this.each(function(d){var g=f(this),h;if(this.nodeType===1){e?h=a.call(this,d,g.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.type]||f.valHooks[this.nodeName.toLowerCase()];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}if(g){c=f.valHooks[g.type]||f.valHooks[g.nodeName.toLowerCase()];if(c&&"get"in c&&(d=c.get(g,"value"))!==b)return d;d=g.value;return typeof d=="string"?d.replace(q,""):d==null?"":d}}}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,g=a.selectedIndex,h=[],i=a.options,j=a.type==="select-one";if(g<0)return null;c=j?g:0,d=j?g+1:i.length;for(;c=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,d,e){var g,h,i,j=a.nodeType;if(!!a&&j!==3&&j!==8&&j!==2){if(e&&c in f.attrFn)return f(a)[c](d);if(typeof a.getAttribute=="undefined")return f.prop(a,c,d);i=j!==1||!f.isXMLDoc(a),i&&(c=c.toLowerCase(),h=f.attrHooks[c]||(u.test(c)?x:w));if(d!==b){if(d===null){f.removeAttr(a,c);return}if(h&&"set"in h&&i&&(g=h.set(a,d,c))!==b)return g;a.setAttribute(c,""+d);return d}if(h&&"get"in h&&i&&(g=h.get(a,c))!==null)return g;g=a.getAttribute(c);return g===null?b:g}},removeAttr:function(a,b){var c,d,e,g,h,i=0;if(b&&a.nodeType===1){d=b.toLowerCase().split(p),g=d.length;for(;i=0}})});var z=/^(?:textarea|input|select)$/i,A=/^([^\.]*)?(?:\.(.+))?$/,B=/(?:^|\s)hover(\.\S+)?\b/,C=/^key/,D=/^(?:mouse|contextmenu)|click/,E=/^(?:focusinfocus|focusoutblur)$/,F=/^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,G=function( +a){var b=F.exec(a);b&&(b[1]=(b[1]||"").toLowerCase(),b[3]=b[3]&&new RegExp("(?:^|\\s)"+b[3]+"(?:\\s|$)"));return b},H=function(a,b){var c=a.attributes||{};return(!b[1]||a.nodeName.toLowerCase()===b[1])&&(!b[2]||(c.id||{}).value===b[2])&&(!b[3]||b[3].test((c["class"]||{}).value))},I=function(a){return f.event.special.hover?a:a.replace(B,"mouseenter$1 mouseleave$1")};f.event={add:function(a,c,d,e,g){var h,i,j,k,l,m,n,o,p,q,r,s;if(!(a.nodeType===3||a.nodeType===8||!c||!d||!(h=f._data(a)))){d.handler&&(p=d,d=p.handler,g=p.selector),d.guid||(d.guid=f.guid++),j=h.events,j||(h.events=j={}),i=h.handle,i||(h.handle=i=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.dispatch.apply(i.elem,arguments):b},i.elem=a),c=f.trim(I(c)).split(" ");for(k=0;k=0&&(h=h.slice(0,-1),k=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if((!e||f.event.customEvent[h])&&!f.event.global[h])return;c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.isTrigger=!0,c.exclusive=k,c.namespace=i.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)"):null,o=h.indexOf(":")<0?"on"+h:"";if(!e){j=f.cache;for(l in j)j[l].events&&j[l].events[h]&&f.event.trigger(c,d,j[l].handle.elem,!0);return}c.result=b,c.target||(c.target=e),d=d!=null?f.makeArray(d):[],d.unshift(c),p=f.event.special[h]||{};if(p.trigger&&p.trigger.apply(e,d)===!1)return;r=[[e,p.bindType||h]];if(!g&&!p.noBubble&&!f.isWindow(e)){s=p.delegateType||h,m=E.test(s+h)?e:e.parentNode,n=null;for(;m;m=m.parentNode)r.push([m,s]),n=m;n&&n===e.ownerDocument&&r.push([n.defaultView||n.parentWindow||a,s])}for(l=0;le&&j.push({elem:this,matches:d.slice(e)});for(k=0;k0?this.on(b,null,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0),C.test(b)&&(f.event.fixHooks[b]=f.event.keyHooks),D.test(b)&&(f.event.fixHooks[b]=f.event.mouseHooks)}),function(){function x(a,b,c,e,f,g){for(var h=0,i=e.length;h0){k=j;break}}j=j[a]}e[h]=k}}}function w(a,b,c,e,f,g){for(var h=0,i=e.length;h+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d="sizcache"+(Math.random()+"").replace(".",""),e=0,g=Object.prototype.toString,h=!1,i=!0,j=/\\/g,k=/\r\n/g,l=/\W/;[0,0].sort(function(){i=!1;return 0});var m=function(b,d,e,f){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return e;var i,j,k,l,n,q,r,t,u=!0,v=m.isXML(d),w=[],x=b;do{a.exec(""),i=a.exec(x);if(i){x=i[3],w.push(i[1]);if(i[2]){l=i[3];break}}}while(i);if(w.length>1&&p.exec(b))if(w.length===2&&o.relative[w[0]])j=y(w[0]+w[1],d,f);else{j=o.relative[w[0]]?[d]:m(w.shift(),d);while(w.length)b=w.shift(),o.relative[b]&&(b+=w.shift()),j=y(b,j,f)}else{!f&&w.length>1&&d.nodeType===9&&!v&&o.match.ID.test(w[0])&&!o.match.ID.test(w[w.length-1])&&(n=m.find(w.shift(),d,v),d=n.expr?m.filter(n.expr,n.set)[0]:n.set[0]);if(d){n=f?{expr:w.pop(),set:s(f)}:m.find(w.pop(),w.length===1&&(w[0]==="~"||w[0]==="+")&&d.parentNode?d.parentNode:d,v),j=n.expr?m.filter(n.expr,n.set):n.set,w.length>0?k=s(j):u=!1;while(w.length)q=w.pop(),r=q,o.relative[q]?r=w.pop():q="",r==null&&(r=d),o.relative[q](k,r,v)}else k=w=[]}k||(k=j),k||m.error(q||b);if(g.call(k)==="[object Array]")if(!u)e.push.apply(e,k);else if(d&&d.nodeType===1)for(t=0;k[t]!=null;t++)k[t]&&(k[t]===!0||k[t].nodeType===1&&m.contains(d,k[t]))&&e.push(j[t]);else for(t=0;k[t]!=null;t++)k[t]&&k[t].nodeType===1&&e.push(j[t]);else s(k,e);l&&(m(l,h,e,f),m.uniqueSort(e));return e};m.uniqueSort=function(a){if(u){h=i,a.sort(u);if(h)for(var b=1;b0},m.find=function(a,b,c){var d,e,f,g,h,i;if(!a)return[];for(e=0,f=o.order.length;e":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!l.test(b)){b=b.toLowerCase();for(;e=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(j,"")},TAG:function(a,b){return a[1].replace(j,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||m.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&m.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(j,"");!f&&o.attrMap[g]&&(a[1]=o.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(j,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=m(b[3],null,null,c);else{var g=m.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(o.match.POS.test(b[0])||o.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!m(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return bc[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=o.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||n([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||!!a.nodeName&&a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=m.attr?m.attr(a,c):o.attrHandle[c]?o.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":!f&&m.attr?d!=null:f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=o.setFilters[e];if(f)return f(a,c,b,d)}}},p=o.match.POS,q=function(a,b){return"\\"+(b-0+1)};for(var r in o.match)o.match[r]=new RegExp(o.match[r].source+/(?![^\[]*\])(?![^\(]*\))/.source),o.leftMatch[r]=new RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[r].source.replace(/\\(\d+)/g,q));o.match.globalPOS=p;var s=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(t){s=function(a,b){var c=0,d=b||[];if(g.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var e=a.length;c",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(o.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},o.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(o.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(o.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=m,b=c.createElement("div"),d="__sizzle__";b.innerHTML="

";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){m=function(b,e,f,g){e=e||c;if(!g&&!m.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return s(e.getElementsByTagName(b),f);if(h[2]&&o.find.CLASS&&e.getElementsByClassName)return s(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return s([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return s([],f);if(i.id===h[3])return s([i],f)}try{return s(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var k=e,l=e.getAttribute("id"),n=l||d,p=e.parentNode,q=/^\s*[+~]/.test(b);l?n=n.replace(/'/g,"\\$&"):e.setAttribute("id",n),q&&p&&(e=e.parentNode);try{if(!q||p)return s(e.querySelectorAll("[id='"+n+"'] "+b),f)}catch(r){}finally{l||k.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)m[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}m.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!m.isXML(a))try{if(e||!o.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return m(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="
";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;o.order.splice(1,0,"CLASS"),o.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?m.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?m.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:m.contains=function(){return!1},m.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var y=function(a,b,c){var d,e=[],f="",g=b.nodeType?[b]:b;while(d=o.match.PSEUDO.exec(a))f+=d[0],a=a.replace(o.match.PSEUDO,"");a=o.relative[a]?a+"*":a;for(var h=0,i=g.length;h0)for(h=g;h=0:f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h=1;while(g&&g.ownerDocument&&g!==b){for(d=0;d-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(S(c[0])||S(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c);L.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!R[a]?f.unique(e):e,(this.length>1||N.test(d))&&M.test(a)&&(e=e.reverse());return this.pushStack(e,a,P.call(arguments).join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var V="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/]","i"),bd=/checked\s*(?:[^=]|=\s*.checked.)/i,be=/\/(java|ecma)script/i,bf=/^\s*",""],legend:[1,"
","
"],thead:[1,"","
"],tr:[2,"","
"],td:[3,"","
"],col:[2,"","
"],area:[1,"",""],_default:[0,"",""]},bh=U(c);bg.optgroup=bg.option,bg.tbody=bg.tfoot=bg.colgroup=bg.caption=bg.thead,bg.th=bg.td,f.support.htmlSerialize||(bg._default=[1,"div
","
"]),f.fn.extend({text:function(a){return f.access(this,function(a){return a===b?f.text(this):this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a))},null,a,arguments.length)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=f.isFunction(a);return this.each(function(c){f(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f +.clean(arguments);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f.clean(arguments));return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){return f.access(this,function(a){var c=this[0]||{},d=0,e=this.length;if(a===b)return c.nodeType===1?c.innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!bg[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1>");try{for(;d1&&l0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d,e,g,h=f.support.html5Clone||f.isXMLDoc(a)||!bc.test("<"+a.nodeName+">")?a.cloneNode(!0):bo(a);if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bk(a,h),d=bl(a),e=bl(h);for(g=0;d[g];++g)e[g]&&bk(d[g],e[g])}if(b){bj(a,h);if(c){d=bl(a),e=bl(h);for(g=0;d[g];++g)bj(d[g],e[g])}}d=e=null;return h},clean:function(a,b,d,e){var g,h,i,j=[];b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);for(var k=0,l;(l=a[k])!=null;k++){typeof l=="number"&&(l+="");if(!l)continue;if(typeof l=="string")if(!_.test(l))l=b.createTextNode(l);else{l=l.replace(Y,"<$1>");var m=(Z.exec(l)||["",""])[1].toLowerCase(),n=bg[m]||bg._default,o=n[0],p=b.createElement("div"),q=bh.childNodes,r;b===c?bh.appendChild(p):U(b).appendChild(p),p.innerHTML=n[1]+l+n[2];while(o--)p=p.lastChild;if(!f.support.tbody){var s=$.test(l),t=m==="table"&&!s?p.firstChild&&p.firstChild.childNodes:n[1]===""&&!s?p.childNodes:[];for(i=t.length-1;i>=0;--i)f.nodeName(t[i],"tbody")&&!t[i].childNodes.length&&t[i].parentNode.removeChild(t[i])}!f.support.leadingWhitespace&&X.test(l)&&p.insertBefore(b.createTextNode(X.exec(l)[0]),p.firstChild),l=p.childNodes,p&&(p.parentNode.removeChild(p),q.length>0&&(r=q[q.length-1],r&&r.parentNode&&r.parentNode.removeChild(r)))}var u;if(!f.support.appendChecked)if(l[0]&&typeof (u=l.length)=="number")for(i=0;i1)},f.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=by(a,"opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":f.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!!a&&a.nodeType!==3&&a.nodeType!==8&&!!a.style){var g,h,i=f.camelCase(c),j=a.style,k=f.cssHooks[i];c=f.cssProps[i]||i;if(d===b){if(k&&"get"in k&&(g=k.get(a,!1,e))!==b)return g;return j[c]}h=typeof d,h==="string"&&(g=bu.exec(d))&&(d=+(g[1]+1)*+g[2]+parseFloat(f.css(a,c)),h="number");if(d==null||h==="number"&&isNaN(d))return;h==="number"&&!f.cssNumber[i]&&(d+="px");if(!k||!("set"in k)||(d=k.set(a,d))!==b)try{j[c]=d}catch(l){}}},css:function(a,c,d){var e,g;c=f.camelCase(c),g=f.cssHooks[c],c=f.cssProps[c]||c,c==="cssFloat"&&(c="float");if(g&&"get"in g&&(e=g.get(a,!0,d))!==b)return e;if(by)return by(a,c)},swap:function(a,b,c){var d={},e,f;for(f in b)d[f]=a.style[f],a.style[f]=b[f];e=c.call(a);for(f in b)a.style[f]=d[f];return e}}),f.curCSS=f.css,c.defaultView&&c.defaultView.getComputedStyle&&(bz=function(a,b){var c,d,e,g,h=a.style;b=b.replace(br,"-$1").toLowerCase(),(d=a.ownerDocument.defaultView)&&(e=d.getComputedStyle(a,null))&&(c=e.getPropertyValue(b),c===""&&!f.contains(a.ownerDocument.documentElement,a)&&(c=f.style(a,b))),!f.support.pixelMargin&&e&&bv.test(b)&&bt.test(c)&&(g=h.width,h.width=c,c=e.width,h.width=g);return c}),c.documentElement.currentStyle&&(bA=function(a,b){var c,d,e,f=a.currentStyle&&a.currentStyle[b],g=a.style;f==null&&g&&(e=g[b])&&(f=e),bt.test(f)&&(c=g.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),g.left=b==="fontSize"?"1em":f,f=g.pixelLeft+"px",g.left=c,d&&(a.runtimeStyle.left=d));return f===""?"auto":f}),by=bz||bA,f.each(["height","width"],function(a,b){f.cssHooks[b]={get:function(a,c,d){if(c)return a.offsetWidth!==0?bB(a,b,d):f.swap(a,bw,function(){return bB(a,b,d)})},set:function(a,b){return bs.test(b)?b+"px":b}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return bq.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNumeric(b)?"alpha(opacity="+b*100+")":"",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bp,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bp.test(g)?g.replace(bp,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){return f.swap(a,{display:"inline-block"},function(){return b?by(a,"margin-right"):a.style.marginRight})}})}),f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style&&a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)}),f.each({margin:"",padding:"",border:"Width"},function(a,b){f.cssHooks[a+b]={expand:function(c){var d,e=typeof c=="string"?c.split(" "):[c],f={};for(d=0;d<4;d++)f[a+bx[d]+b]=e[d]||e[d-2]||e[0];return f}}});var bC=/%20/g,bD=/\[\]$/,bE=/\r?\n/g,bF=/#.*$/,bG=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bH=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bI=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bJ=/^(?:GET|HEAD)$/,bK=/^\/\//,bL=/\?/,bM=/)<[^<]*)*<\/script>/gi,bN=/^(?:select|textarea)/i,bO=/\s+/,bP=/([?&])_=[^&]*/,bQ=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bR=f.fn.load,bS={},bT={},bU,bV,bW=["*/"]+["*"];try{bU=e.href}catch(bX){bU=c.createElement("a"),bU.href="",bU=bU.href}bV=bQ.exec(bU.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bR)return bR.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("
").append(c.replace(bM,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bN.test(this.nodeName)||bH.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bE,"\r\n")}}):{name:b.name,value:c.replace(bE,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.on(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?b$(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),b$(a,b);return a},ajaxSettings:{url:bU,isLocal:bI.test(bV[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded; charset=UTF-8",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bW},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bY(bS),ajaxTransport:bY(bT),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?ca(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=cb(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.fireWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f.Callbacks("once memory"),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bG.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.add,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bF,"").replace(bK,bV[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bO),d.crossDomain==null&&(r=bQ.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bV[1]&&r[2]==bV[2]&&(r[3]||(r[1]==="http:"?80:443))==(bV[3]||(bV[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),bZ(bS,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bJ.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bL.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bP,"$1_="+x);d.url=y+(y===d.url?(bL.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bW+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=bZ(bT,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){if(s<2)w(-1,z);else throw z}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)b_(g,a[g],c,e);return d.join("&").replace(bC,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var cc=f.now(),cd=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+cc++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=typeof b.data=="string"&&/^application\/x\-www\-form\-urlencoded/.test(b.contentType);if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(cd.test(b.url)||e&&cd.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(cd,l),b.url===j&&(e&&(k=k.replace(cd,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var ce=a.ActiveXObject?function(){for(var a in cg)cg[a](0,1)}:!1,cf=0,cg;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ch()||ci()}:ch,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,ce&&delete cg[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n);try{m.text=h.responseText}catch(a){}try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cf,ce&&(cg||(cg={},f(a).unload(ce)),cg[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var cj={},ck,cl,cm=/^(?:toggle|show|hide)$/,cn=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,co,cp=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cq;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(ct("show",3),a,b,c);for(var g=0,h=this.length;g=i.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),i.animatedProperties[this.prop]=!0;for(b in i.animatedProperties)i.animatedProperties[b]!==!0&&(g=!1);if(g){i.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){h.style["overflow"+b]=i.overflow[a]}),i.hide&&f(h).hide();if(i.hide||i.show)for(b in i.animatedProperties)f.style(h,b,i.orig[b]),f.removeData(h,"fxshow"+b,!0),f.removeData(h,"toggle"+b,!0);d=i.complete,d&&(i.complete=!1,d.call(h))}return!1}i.duration==Infinity?this.now=e:(c=e-this.startTime,this.state=c/i.duration,this.pos=f.easing[i.animatedProperties[this.prop]](this.state,c,0,1,i.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){var a,b=f.timers,c=0;for(;c-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=cx.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!cx.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,c){var d=/Y/.test(c);f.fn[a]=function(e){return f.access(this,function(a,e,g){var h=cy(a);if(g===b)return h?c in h?h[c]:f.support.boxModel&&h.document.documentElement[e]||h.document.body[e]:a[e];h?h.scrollTo(d?f(h).scrollLeft():g,d?g:f(h).scrollTop()):a[e]=g},a,e,arguments.length,null)}}),f.each({Height:"height",Width:"width"},function(a,c){var d="client"+a,e="scroll"+a,g="offset"+a;f.fn["inner"+a]=function(){var a=this[0];return a?a.style?parseFloat(f.css(a,c,"padding")):this[c]():null},f.fn["outer"+a]=function(a){var b=this[0];return b?b.style?parseFloat(f.css(b,c,a?"margin":"border")):this[c]():null},f.fn[c]=function(a){return f.access(this,function(a,c,h){var i,j,k,l;if(f.isWindow(a)){i=a.document,j=i.documentElement[d];return f.support.boxModel&&j||i.body&&i.body[d]||j}if(a.nodeType===9){i=a.documentElement;if(i[d]>=i[e])return i[d];return Math.max(a.body[e],i[e],a.body[g],i[g])}if(h===b){k=f.css(a,c),l=parseFloat(k);return f.isNumeric(l)?l:k}f(a).css(c,h)},c,a,arguments.length,null)}}),a.jQuery=a.$=f,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return f})})(window); \ No newline at end of file diff --git a/awx/lib/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html b/awx/lib/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html index dcb8f18b32..bf6e3cc080 100644 --- a/awx/lib/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html +++ b/awx/lib/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html @@ -1,15 +1,14 @@ -{% load i18n %} +{% load i18n staticfiles %} - {% trans + {% trans diff --git a/awx/lib/site-packages/django_extensions/templatetags/widont.py b/awx/lib/site-packages/django_extensions/templatetags/widont.py index d42833f941..a2d0af30f4 100644 --- a/awx/lib/site-packages/django_extensions/templatetags/widont.py +++ b/awx/lib/site-packages/django_extensions/templatetags/widont.py @@ -1,7 +1,10 @@ -from django.template import Library -from django.utils.encoding import force_unicode import re -import six +from django.template import Library +try: + from django.utils.encoding import force_text +except ImportError: + # Django 1.4 compatibility + from django.utils.encoding import force_unicode as force_text register = Library() re_widont = re.compile(r'\s+(\S+\s*)$') @@ -25,9 +28,9 @@ def widont(value, count=1): NoEffect """ def replace(matchobj): - return six.u(' %s' % matchobj.group(1)) + return force_text(' %s' % matchobj.group(1)) for i in range(count): - value = re_widont.sub(replace, force_unicode(value)) + value = re_widont.sub(replace, force_text(value)) return value @@ -49,8 +52,8 @@ def widont_html(value): leading text

test me out

trailing text """ def replace(matchobj): - return six.u('%s %s%s' % matchobj.groups()) - return re_widont_html.sub(replace, force_unicode(value)) + return force_text('%s %s%s' % matchobj.groups()) + return re_widont_html.sub(replace, force_text(value)) register.filter(widont) register.filter(widont_html) diff --git a/awx/lib/site-packages/django_extensions/tests/__init__.py b/awx/lib/site-packages/django_extensions/tests/__init__.py index 2da4f14d00..5e97235156 100644 --- a/awx/lib/site-packages/django_extensions/tests/__init__.py +++ b/awx/lib/site-packages/django_extensions/tests/__init__.py @@ -5,11 +5,11 @@ from django_extensions.tests.json_field import JsonFieldTest from django_extensions.tests.uuid_field import UUIDFieldTest from django_extensions.tests.fields import AutoSlugFieldTest from django_extensions.tests.management_command import CommandTest, ShowTemplateTagsTests - +from django_extensions.tests.test_templatetags import TemplateTagsTests __test_classes__ = [ DumpScriptTests, JsonFieldTest, UUIDFieldTest, AutoSlugFieldTest, CommandTest, - ShowTemplateTagsTests, TruncateLetterTests + ShowTemplateTagsTests, TruncateLetterTests, TemplateTagsTests ] try: diff --git a/awx/lib/site-packages/django_extensions/tests/encrypted_fields.py b/awx/lib/site-packages/django_extensions/tests/encrypted_fields.py index ad391e83ac..dee9fd3b57 100644 --- a/awx/lib/site-packages/django_extensions/tests/encrypted_fields.py +++ b/awx/lib/site-packages/django_extensions/tests/encrypted_fields.py @@ -2,12 +2,11 @@ from contextlib import contextmanager import functools from django.conf import settings -from django.core.management import call_command from django.db import connection, models from django.db.models import loading -from django.utils import unittest from django_extensions.tests.models import Secret +from django_extensions.tests.fields import FieldTestCase # Only perform encrypted fields tests if keyczar is present. Resolves # http://github.com/django-extensions/django-extensions/issues/#issue/17 @@ -128,18 +127,7 @@ def secret_model(): pass -class EncryptedFieldsTestCase(unittest.TestCase): - - def setUp(self): - self.old_installed_apps = settings.INSTALLED_APPS - settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) - settings.INSTALLED_APPS.append('django_extensions.tests') - loading.cache.loaded = False - call_command('syncdb', verbosity=0) - - def tearDown(self): - settings.INSTALLED_APPS = self.old_installed_apps - +class EncryptedFieldsTestCase(FieldTestCase): @run_if_active def testCharFieldCreate(self): """ diff --git a/awx/lib/site-packages/django_extensions/tests/fields.py b/awx/lib/site-packages/django_extensions/tests/fields.py index 3edd360e79..ab35067259 100644 --- a/awx/lib/site-packages/django_extensions/tests/fields.py +++ b/awx/lib/site-packages/django_extensions/tests/fields.py @@ -16,18 +16,27 @@ class ChildSluggedTestModel(SluggedTestModel): pass -class AutoSlugFieldTest(unittest.TestCase): +class FieldTestCase(unittest.TestCase): def setUp(self): self.old_installed_apps = settings.INSTALLED_APPS settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) settings.INSTALLED_APPS.append('django_extensions.tests') loading.cache.loaded = False - call_command('syncdb', verbosity=0) + + # Don't migrate if south is installed + migrate = 'south' not in settings.INSTALLED_APPS + call_command('syncdb', verbosity=0, migrate=migrate) def tearDown(self): - SluggedTestModel.objects.all().delete() settings.INSTALLED_APPS = self.old_installed_apps + +class AutoSlugFieldTest(FieldTestCase): + def tearDown(self): + super(AutoSlugFieldTest, self).tearDown() + + SluggedTestModel.objects.all().delete() + def testAutoCreateSlug(self): m = SluggedTestModel(title='foo') m.save() @@ -55,15 +64,22 @@ class AutoSlugFieldTest(unittest.TestCase): def testUpdateSlug(self): m = SluggedTestModel(title='foo') m.save() + self.assertEqual(m.slug, 'foo') # update m instance without using `save' SluggedTestModel.objects.filter(pk=m.pk).update(slug='foo-2012') # update m instance with new data from the db m = SluggedTestModel.objects.get(pk=m.pk) - self.assertEqual(m.slug, 'foo-2012') m.save() + self.assertEqual(m.title, 'foo') + self.assertEqual(m.slug, 'foo-2012') + + # Check slug is not overwrite + m.title = 'bar' + m.save() + self.assertEqual(m.title, 'bar') self.assertEqual(m.slug, 'foo-2012') def testSimpleSlugSource(self): diff --git a/awx/lib/site-packages/django_extensions/tests/json_field.py b/awx/lib/site-packages/django_extensions/tests/json_field.py index e9aed0ffc0..73221a129c 100644 --- a/awx/lib/site-packages/django_extensions/tests/json_field.py +++ b/awx/lib/site-packages/django_extensions/tests/json_field.py @@ -1,10 +1,7 @@ -from django.conf import settings -from django.core.management import call_command -from django.db.models import loading from django.db import models -from django.utils import unittest from django_extensions.db.fields.json import JSONField +from django_extensions.tests.fields import FieldTestCase class TestModel(models.Model): @@ -12,17 +9,7 @@ class TestModel(models.Model): j_field = JSONField() -class JsonFieldTest(unittest.TestCase): - def setUp(self): - self.old_installed_apps = settings.INSTALLED_APPS - settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) - settings.INSTALLED_APPS.append('django_extensions.tests') - loading.cache.loaded = False - call_command('syncdb', verbosity=0) - - def tearDown(self): - settings.INSTALLED_APPS = self.old_installed_apps - +class JsonFieldTest(FieldTestCase): def testCharFieldCreate(self): j = TestModel.objects.create(a=6, j_field=dict(foo='bar')) self.assertEqual(j.a, 6) diff --git a/awx/lib/site-packages/django_extensions/tests/test_dumpscript.py b/awx/lib/site-packages/django_extensions/tests/test_dumpscript.py index 90aa2898bd..f528147aa6 100644 --- a/awx/lib/site-packages/django_extensions/tests/test_dumpscript.py +++ b/awx/lib/site-packages/django_extensions/tests/test_dumpscript.py @@ -6,34 +6,26 @@ if sys.version_info[:2] >= (2, 6): else: import compiler # NOQA -from django.test import TestCase - from django.core.management import call_command + from django_extensions.tests.models import Name, Note, Person - -from django.conf import settings -from django.db.models import loading +from django_extensions.tests.fields import FieldTestCase -class DumpScriptTests(TestCase): +class DumpScriptTests(FieldTestCase): def setUp(self): + super(DumpScriptTests, self).setUp() + self.real_stdout = sys.stdout self.real_stderr = sys.stderr sys.stdout = six.StringIO() sys.stderr = six.StringIO() - self.original_installed_apps = settings.INSTALLED_APPS - settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) - settings.INSTALLED_APPS.append('django_extensions.tests') - loading.cache.loaded = False - call_command('syncdb', verbosity=0) - def tearDown(self): + super(DumpScriptTests, self).tearDown() + sys.stdout = self.real_stdout sys.stderr = self.real_stderr - settings.INSTALLED_APPS.remove('django_extensions.tests') - settings.INSTALLED_APPS = self.original_installed_apps - loading.cache.loaded = False def test_runs(self): # lame test...does it run? diff --git a/awx/lib/site-packages/django_extensions/tests/test_templatetags.py b/awx/lib/site-packages/django_extensions/tests/test_templatetags.py new file mode 100644 index 0000000000..db69fa0ac9 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/tests/test_templatetags.py @@ -0,0 +1,13 @@ +from django.test import TestCase + +from django_extensions.templatetags.widont import widont, widont_html + + +class TemplateTagsTests(TestCase): + def test_widont(self): + widont('Test Value') + widont(u'Test Value') + + def test_widont_html(self): + widont_html('Test Value') + widont_html(u'Test Value') diff --git a/awx/lib/site-packages/django_extensions/tests/uuid_field.py b/awx/lib/site-packages/django_extensions/tests/uuid_field.py index 823ff2e2ba..72c793ba80 100644 --- a/awx/lib/site-packages/django_extensions/tests/uuid_field.py +++ b/awx/lib/site-packages/django_extensions/tests/uuid_field.py @@ -1,11 +1,9 @@ import six -from django.conf import settings -from django.core.management import call_command -from django.db.models import loading + from django.db import models -from django.utils import unittest from django_extensions.db.fields import UUIDField +from django_extensions.tests.fields import FieldTestCase class TestModel_field(models.Model): @@ -25,17 +23,7 @@ class TestManyToManyModel(TestModel_pk): many = models.ManyToManyField(TestModel_field) -class UUIDFieldTest(unittest.TestCase): - def setUp(self): - self.old_installed_apps = settings.INSTALLED_APPS - settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) - settings.INSTALLED_APPS.append('django_extensions.tests') - loading.cache.loaded = False - call_command('syncdb', verbosity=0) - - def tearDown(self): - settings.INSTALLED_APPS = self.old_installed_apps - +class UUIDFieldTest(FieldTestCase): def testUUIDFieldCreate(self): j = TestModel_field.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000')) self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000')) diff --git a/awx/lib/site-packages/django_extensions/utils/dia2django.py b/awx/lib/site-packages/django_extensions/utils/dia2django.py index 91a48631df..28d664b733 100644 --- a/awx/lib/site-packages/django_extensions/utils/dia2django.py +++ b/awx/lib/site-packages/django_extensions/utils/dia2django.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # -*- coding: UTF-8 -*- ##Author Igor Támara igor@tamarapatino.org ##Use this little program as you wish, if you @@ -12,12 +11,12 @@ dependclasses = ["User", "Group", "Permission", "Message"] -import codecs -import sys -import gzip -from xml.dom.minidom import * # NOQA import re import six +import sys +import gzip +import codecs +from xml.dom.minidom import * # NOQA #Type dictionary translation types SQL -> Django tsd = { @@ -172,7 +171,7 @@ def dia2django(archivo): #Ordering the appearance of classes #First we make a list of the classes each classs is related to. ordered = [] - for j, k in clases.iteritems(): + for j, k in six.iteritems(clases): k[2] = k[2] + "\n def __unicode__(self):\n return u\"\"\n" for fk in k[0]: if fk not in dependclasses: diff --git a/awx/lib/site-packages/djcelery/__init__.py b/awx/lib/site-packages/djcelery/__init__.py index 7b3d0eb8a5..5ff946410f 100644 --- a/awx/lib/site-packages/djcelery/__init__.py +++ b/awx/lib/site-packages/djcelery/__init__.py @@ -5,7 +5,7 @@ from __future__ import absolute_import, unicode_literals import os -VERSION = (3, 1, 1) +VERSION = (3, 1, 10) __version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:]) __author__ = 'Ask Solem' __contact__ = 'ask@celeryproject.org' diff --git a/awx/lib/site-packages/djcelery/admin.py b/awx/lib/site-packages/djcelery/admin.py index 14313ece5e..0cab8e5fdc 100644 --- a/awx/lib/site-packages/djcelery/admin.py +++ b/awx/lib/site-packages/djcelery/admin.py @@ -200,8 +200,8 @@ class TaskMonitor(ModelMonitor): actions.pop('delete_selected', None) return actions - def queryset(self, request): - qs = super(TaskMonitor, self).queryset(request) + def get_queryset(self, request): + qs = super(TaskMonitor, self).get_queryset(request) return qs.select_related('worker') @@ -261,6 +261,7 @@ def periodic_task_form(): class Meta: model = PeriodicTask + exclude = () def clean(self): data = super(PeriodicTaskForm, self).clean() @@ -326,8 +327,8 @@ class PeriodicTaskAdmin(admin.ModelAdmin): return super(PeriodicTaskAdmin, self).changelist_view(request, extra_context) - def queryset(self, request): - qs = super(PeriodicTaskAdmin, self).queryset(request) + def get_queryset(self, request): + qs = super(PeriodicTaskAdmin, self).get_queryset(request) return qs.select_related('interval', 'crontab') diff --git a/awx/lib/site-packages/djcelery/contrib/test_runner.py b/awx/lib/site-packages/djcelery/contrib/test_runner.py index 944e49ccae..e65de297ee 100644 --- a/awx/lib/site-packages/djcelery/contrib/test_runner.py +++ b/awx/lib/site-packages/djcelery/contrib/test_runner.py @@ -1,11 +1,14 @@ from __future__ import absolute_import, unicode_literals from django.conf import settings -from django.test.simple import DjangoTestSuiteRunner +try: + from django.test.runner import DiscoverRunner +except ImportError: + from django.test.simple import DjangoTestSuiteRunner as DiscoverRunner -from djcelery.app import app -from djcelery.backends.database import DatabaseBackend +from celery import current_app from celery.task import Task +from djcelery.backends.database import DatabaseBackend USAGE = """\ @@ -15,12 +18,12 @@ Custom test runner to allow testing of celery delayed tasks. def _set_eager(): settings.CELERY_ALWAYS_EAGER = True - app.conf.CELERY_ALWAYS_EAGER = True + current_app.conf.CELERY_ALWAYS_EAGER = True settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True # Issue #75 - app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True + current_app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True -class CeleryTestSuiteRunner(DjangoTestSuiteRunner): +class CeleryTestSuiteRunner(DiscoverRunner): """Django test runner allowing testing of celery delayed tasks. All tasks are run locally, not in a worker. @@ -35,7 +38,7 @@ class CeleryTestSuiteRunner(DjangoTestSuiteRunner): super(CeleryTestSuiteRunner, self).setup_test_environment(**kwargs) -class CeleryTestSuiteRunnerStoringResult(DjangoTestSuiteRunner): +class CeleryTestSuiteRunnerStoringResult(DiscoverRunner): """Django test runner allowing testing of celery delayed tasks, and storing the results of those tasks in ``TaskMeta``. @@ -54,7 +57,8 @@ class CeleryTestSuiteRunnerStoringResult(DjangoTestSuiteRunner): def setup_test_environment(self, **kwargs): # Monkey-patch Task.on_success() method def on_success_patched(self, retval, task_id, args, kwargs): - DatabaseBackend().store_result(task_id, retval, 'SUCCESS') + app = current_app._get_current_object() + DatabaseBackend(app=app).store_result(task_id, retval, 'SUCCESS') Task.on_success = classmethod(on_success_patched) super(CeleryTestSuiteRunnerStoringResult, self).setup_test_environment( diff --git a/awx/lib/site-packages/djcelery/db.py b/awx/lib/site-packages/djcelery/db.py new file mode 100644 index 0000000000..2204083a5c --- /dev/null +++ b/awx/lib/site-packages/djcelery/db.py @@ -0,0 +1,63 @@ +from __future__ import absolute_import + +import django + +from contextlib import contextmanager +from django.db import transaction + +if django.VERSION < (1, 6): # pragma: no cover + + def get_queryset(s): + return s.get_query_set() +else: + def get_queryset(s): # noqa + return s.get_queryset() + +try: + from django.db.transaction import atomic # noqa +except ImportError: # pragma: no cover + + try: + from django.db.transaction import Transaction # noqa + except ImportError: + @contextmanager + def commit_on_success(*args, **kwargs): + try: + transaction.enter_transaction_management(*args, **kwargs) + transaction.managed(True, *args, **kwargs) + try: + yield + except: + if transaction.is_dirty(*args, **kwargs): + transaction.rollback(*args, **kwargs) + raise + else: + if transaction.is_dirty(*args, **kwargs): + try: + transaction.commit(*args, **kwargs) + except: + transaction.rollback(*args, **kwargs) + raise + finally: + transaction.leave_transaction_management(*args, **kwargs) + else: # pragma: no cover + from django.db.transaction import commit_on_success # noqa + + commit_unless_managed = transaction.commit_unless_managed + rollback_unless_managed = transaction.rollback_unless_managed +else: + @contextmanager + def commit_on_success(using=None): # noqa + connection = transaction.get_connection(using) + if connection.features.autocommits_when_autocommit_is_off: + # ignore stupid warnings and errors + yield + else: + with transaction.atomic(using): + yield + + def commit_unless_managed(*args, **kwargs): # noqa + pass + + def rollback_unless_managed(*args, **kwargs): # noqa + pass diff --git a/awx/lib/site-packages/djcelery/humanize.py b/awx/lib/site-packages/djcelery/humanize.py index 355daa0cdd..9ddc7a4382 100644 --- a/awx/lib/site-packages/djcelery/humanize.py +++ b/awx/lib/site-packages/djcelery/humanize.py @@ -5,22 +5,29 @@ from datetime import datetime from django.utils.translation import ungettext, ugettext as _ from .utils import now -JUST_NOW = _('just now') -SECONDS_AGO = (_('{seconds} second ago'), _('{seconds} seconds ago')) -MINUTES_AGO = (_('{minutes} minute ago'), _('{minutes} minutes ago')) -HOURS_AGO = (_('{hours} hour ago'), _('{hours} hours ago')) -YESTERDAY_AT = _('yesterday at {time}') -OLDER_YEAR = (_('year'), _('years')) -OLDER_MONTH = (_('month'), _('months')) -OLDER_WEEK = (_('week'), _('weeks')) -OLDER_DAY = (_('day'), _('days')) + +def pluralize_year(n): + return ungettext(_('{num} year ago'), _('{num} years ago'), n) + + +def pluralize_month(n): + return ungettext(_('{num} month ago'), _('{num} months ago'), n) + + +def pluralize_week(n): + return ungettext(_('{num} week ago'), _('{num} weeks ago'), n) + + +def pluralize_day(n): + return ungettext(_('{num} day ago'), _('{num} days ago'), n) + + OLDER_CHUNKS = ( - (365.0, OLDER_YEAR), - (30.0, OLDER_MONTH), - (7.0, OLDER_WEEK), - (1.0, OLDER_DAY), + (365.0, pluralize_year), + (30.0, pluralize_month), + (7.0, pluralize_week), + (1.0, pluralize_day), ) -OLDER_AGO = _('{number} {type} ago') def _un(singular__plural, n=None): @@ -28,7 +35,7 @@ def _un(singular__plural, n=None): return ungettext(singular, plural, n) -def naturaldate(date): +def naturaldate(date, include_seconds=False): """Convert datetime into a human natural date string.""" if not date: @@ -41,29 +48,38 @@ def naturaldate(date): delta_midnight = today - date days = delta.days - hours = round(delta.seconds / 3600, 0) + hours = int(round(delta.seconds / 3600, 0)) minutes = delta.seconds / 60 + seconds = delta.seconds if days < 0: - return JUST_NOW + return _('just now') if days == 0: if hours == 0: if minutes > 0: - return _un(MINUTES_AGO, n=minutes).format(minutes=minutes) + return ungettext( + _('{minutes} minute ago'), + _('{minutes} minutes ago'), minutes + ).format(minutes=minutes) else: - return JUST_NOW + if include_seconds and seconds: + return ungettext( + _('{seconds} second ago'), + _('{seconds} seconds ago'), seconds + ).format(seconds=seconds) + return _('just now') else: - return _un(HOURS_AGO, n=hours).format(hours=hours) + return ungettext( + _('{hours} hour ago'), _('{hours} hours ago'), hours + ).format(hours=hours) if delta_midnight.days == 0: - return YESTERDAY_AT.format(time=date.strftime('%H:%M')) + return _('yesterday at {time}').format(time=date.strftime('%H:%M')) count = 0 - for chunk, singular_plural in OLDER_CHUNKS: + for chunk, pluralizefun in OLDER_CHUNKS: if days >= chunk: count = round((delta_midnight.days + 1) / chunk, 0) - type_ = _un(singular_plural, n=count) - break - - return OLDER_AGO.format(number=count, type=type_) + fmt = pluralizefun(count) + return fmt.format(num=count) diff --git a/awx/lib/site-packages/djcelery/loaders.py b/awx/lib/site-packages/djcelery/loaders.py index 9f488e1100..df0a668f3a 100644 --- a/awx/lib/site-packages/djcelery/loaders.py +++ b/awx/lib/site-packages/djcelery/loaders.py @@ -72,7 +72,10 @@ class DjangoLoader(BaseLoader): try: funs = [conn.close for conn in db.connections] except AttributeError: - funs = [db.close_connection] # pre multidb + if hasattr(db, 'close_old_connections'): # Django 1.6+ + funs = [db.close_old_connections] + else: + funs = [db.close_connection] # pre multidb for close in funs: try: diff --git a/awx/lib/site-packages/djcelery/management/base.py b/awx/lib/site-packages/djcelery/management/base.py index 1b885bd904..b1190f801d 100644 --- a/awx/lib/site-packages/djcelery/management/base.py +++ b/awx/lib/site-packages/djcelery/management/base.py @@ -80,6 +80,9 @@ class CeleryCommand(BaseCommand): acc = [] broker = None for i, arg in enumerate(argv): + # --settings and --pythonpath are also handled + # by BaseCommand.handle_default_options, but that is + # called with the resulting options parsed by optparse. if '--settings=' in arg: _, settings_module = arg.split('=') os.environ['DJANGO_SETTINGS_MODULE'] = settings_module diff --git a/awx/lib/site-packages/djcelery/management/commands/celery.py b/awx/lib/site-packages/djcelery/management/commands/celery.py index 985de59c8e..c47626a9ea 100644 --- a/awx/lib/site-packages/djcelery/management/commands/celery.py +++ b/awx/lib/site-packages/djcelery/management/commands/celery.py @@ -18,6 +18,8 @@ class Command(CeleryCommand): def run_from_argv(self, argv): argv = self.handle_default_options(argv) + if self.requires_model_validation: + self.validate() base.execute_from_commandline( ['{0[0]} {0[1]}'.format(argv)] + argv[2:], ) diff --git a/awx/lib/site-packages/djcelery/management/commands/celeryd.py b/awx/lib/site-packages/djcelery/management/commands/celeryd.py index 03d5f2d034..1e3262ccf1 100644 --- a/awx/lib/site-packages/djcelery/management/commands/celeryd.py +++ b/awx/lib/site-packages/djcelery/management/commands/celeryd.py @@ -22,4 +22,5 @@ class Command(CeleryCommand): + worker.preload_options) def handle(self, *args, **options): - worker.run(*args, **options) + worker.check_args(args) + worker.run(**options) diff --git a/awx/lib/site-packages/djcelery/managers.py b/awx/lib/site-packages/djcelery/managers.py index e3e4ee58b5..3224802e4f 100644 --- a/awx/lib/site-packages/djcelery/managers.py +++ b/awx/lib/site-packages/djcelery/managers.py @@ -5,7 +5,7 @@ import warnings from functools import wraps from itertools import count -from django.db import transaction, connection +from django.db import connection try: from django.db import connections, router except ImportError: # pre-Django 1.2 @@ -17,6 +17,7 @@ from django.conf import settings from celery.utils.timeutils import maybe_timedelta +from .db import commit_on_success, get_queryset, rollback_unless_managed from .utils import now @@ -47,8 +48,10 @@ def transaction_retry(max_retries=1): # the transaction. if retries >= _max_retries: raise - transaction.rollback_unless_managed() - + try: + rollback_unless_managed() + except Exception: + pass return _inner return _outer @@ -76,11 +79,12 @@ class ExtendedQuerySet(QuerySet): class ExtendedManager(models.Manager): - def get_query_set(self): + def get_queryset(self): return ExtendedQuerySet(self.model) + get_query_set = get_queryset # Pre django 1.6 def update_or_create(self, **kwargs): - return self.get_query_set().update_or_create(**kwargs) + return get_queryset(self).update_or_create(**kwargs) def connection_for_write(self): if connections: @@ -105,22 +109,16 @@ class ResultManager(ExtendedManager): """Get all expired task results.""" return self.filter(date_done__lt=now() - maybe_timedelta(expires)) - @transaction.commit_manually def delete_expired(self, expires): """Delete all expired taskset results.""" meta = self.model._meta - try: + with commit_on_success(): self.get_all_expired(expires).update(hidden=True) cursor = self.connection_for_write().cursor() cursor.execute( 'DELETE FROM {0.db_table} WHERE hidden=%s'.format(meta), (True, ), ) - except: - transaction.rollback() - raise - else: - transaction.commit() class PeriodicTaskManager(ExtendedManager): @@ -238,10 +236,10 @@ class TaskStateManager(ExtendedManager): return self.expired(states, expires).update(hidden=True) def purge(self): - meta = self.model._meta - cursor = self.connection_for_write().cursor() - cursor.execute( - 'DELETE FROM {0.db_table} WHERE hidden=%s'.format(meta), - (True, ), - ) - transaction.commit_unless_managed() + with commit_on_success(): + meta = self.model._meta + cursor = self.connection_for_write().cursor() + cursor.execute( + 'DELETE FROM {0.db_table} WHERE hidden=%s'.format(meta), + (True, ), + ) diff --git a/awx/lib/site-packages/djcelery/models.py b/awx/lib/site-packages/djcelery/models.py index 02ba1603c4..02386e4975 100644 --- a/awx/lib/site-packages/djcelery/models.py +++ b/awx/lib/site-packages/djcelery/models.py @@ -31,8 +31,10 @@ class TaskMeta(models.Model): date_done = models.DateTimeField(_('done at'), auto_now=True) traceback = models.TextField(_('traceback'), blank=True, null=True) hidden = models.BooleanField(editable=False, default=False, db_index=True) + # TODO compression was enabled by mistake, we need to disable it + # but this is a backwards incompatible change that needs planning. meta = PickledObjectField( - _('meta'), null=True, default=None, editable=False, + compress=True, null=True, default=None, editable=False, ) objects = managers.TaskManager() diff --git a/awx/lib/site-packages/djcelery/monproj/urls.py b/awx/lib/site-packages/djcelery/monproj/urls.py index fd1f316a36..c65e0fb744 100644 --- a/awx/lib/site-packages/djcelery/monproj/urls.py +++ b/awx/lib/site-packages/djcelery/monproj/urls.py @@ -4,7 +4,7 @@ try: from django.conf.urls import (patterns, include, url, handler500, handler404) except ImportError: - from django.conf.urls import (patterns, include, url, # noqa + from django.conf.urls.defaults import (patterns, include, url, # noqa handler500, handler404) from django.contrib import admin diff --git a/awx/lib/site-packages/djcelery/picklefield.py b/awx/lib/site-packages/djcelery/picklefield.py index cfb42fb42a..0156cd4eb2 100644 --- a/awx/lib/site-packages/djcelery/picklefield.py +++ b/awx/lib/site-packages/djcelery/picklefield.py @@ -17,7 +17,9 @@ from __future__ import absolute_import, unicode_literals from base64 import b64encode, b64decode from zlib import compress, decompress +from celery.five import with_metaclass from celery.utils.serialization import pickle +from kombu.utils.encoding import bytes_to_str, str_to_bytes from django.db import models @@ -28,6 +30,16 @@ except ImportError: DEFAULT_PROTOCOL = 2 +NO_DECOMPRESS_HEADER = b'\x1e\x00r8d9qwwerwhA@' + + +@with_metaclass(models.SubfieldBase, skip_attrs=set([ + 'db_type', + 'get_db_prep_save' + ])) +class BaseField(models.Field): + pass + class PickledObject(str): pass @@ -35,28 +47,28 @@ class PickledObject(str): def maybe_compress(value, do_compress=False): if do_compress: - return compress(value) + return compress(str_to_bytes(value)) return value def maybe_decompress(value, do_decompress=False): if do_decompress: - return decompress(value) + if str_to_bytes(value[:15]) != NO_DECOMPRESS_HEADER: + return decompress(str_to_bytes(value)) return value def encode(value, compress_object=False, pickle_protocol=DEFAULT_PROTOCOL): - return b64encode(maybe_compress( + return bytes_to_str(b64encode(maybe_compress( pickle.dumps(value, pickle_protocol), compress_object), - ) + )) def decode(value, compress_object=False): return pickle.loads(maybe_decompress(b64decode(value), compress_object)) -class PickledObjectField(models.Field): - __metaclass__ = models.SubfieldBase +class PickledObjectField(BaseField): def __init__(self, compress=False, protocol=DEFAULT_PROTOCOL, *args, **kwargs): diff --git a/awx/lib/site-packages/djcelery/schedulers.py b/awx/lib/site-packages/djcelery/schedulers.py index f144deada7..06c252985d 100644 --- a/awx/lib/site-packages/djcelery/schedulers.py +++ b/awx/lib/site-packages/djcelery/schedulers.py @@ -15,6 +15,7 @@ from celery.utils.timeutils import is_naive from django.db import transaction from django.core.exceptions import ObjectDoesNotExist +from .db import commit_on_success from .models import (PeriodicTask, PeriodicTasks, CrontabSchedule, IntervalSchedule) from .utils import DATABASE_ERRORS, make_aware @@ -195,12 +196,11 @@ class DatabaseScheduler(Scheduler): self._dirty.add(new_entry.name) return new_entry - @transaction.commit_manually def sync(self): info('Writing entries...') _tried = set() try: - try: + with commit_on_success(): while self._dirty: try: name = self._dirty.pop() @@ -208,11 +208,6 @@ class DatabaseScheduler(Scheduler): self.schedule[name].save() except (KeyError, ObjectDoesNotExist): pass - except: - transaction.rollback() - raise - else: - transaction.commit() except DATABASE_ERRORS as exc: # retry later self._dirty |= _tried diff --git a/awx/lib/site-packages/djcelery/snapshot.py b/awx/lib/site-packages/djcelery/snapshot.py index 047e7bc26d..f5227a26c1 100644 --- a/awx/lib/site-packages/djcelery/snapshot.py +++ b/awx/lib/site-packages/djcelery/snapshot.py @@ -3,7 +3,6 @@ from __future__ import absolute_import, unicode_literals from collections import defaultdict from datetime import datetime, timedelta -from django.db import transaction from django.conf import settings from celery import states @@ -34,7 +33,7 @@ debug = logger.debug def aware_tstamp(secs): """Event timestamps uses the local timezone.""" - return maybe_make_aware(datetime.fromtimestamp(secs)) + return maybe_make_aware(datetime.utcfromtimestamp(secs)) class Camera(Polaroid): @@ -77,7 +76,7 @@ class Camera(Polaroid): def handle_task(self, uuid_task, worker=None): """Handle snapshotted event.""" - (uuid, task) = uuid_task + uuid, task = uuid_task if task.worker and task.worker.hostname: worker = self.handle_worker( (task.worker.hostname, task.worker), @@ -130,15 +129,10 @@ class Camera(Polaroid): return obj def on_shutter(self, state, commit_every=100): - if not state.event_count: - transaction.commit() - return def _handle_tasks(): for i, task in enumerate(state.tasks.items()): self.handle_task(task) - if not i % commit_every: - transaction.commit() for worker in state.workers.items(): self.handle_worker(worker) diff --git a/awx/lib/site-packages/djcelery/static/djcelery/style.css b/awx/lib/site-packages/djcelery/static/djcelery/style.css deleted file mode 100644 index b4f4c6aa44..0000000000 --- a/awx/lib/site-packages/djcelery/static/djcelery/style.css +++ /dev/null @@ -1,4 +0,0 @@ -.form-row.field-traceback p { - font-family: monospace; - white-space: pre; -} diff --git a/awx/lib/site-packages/djcelery/templates/admin/djcelery/change_list.html b/awx/lib/site-packages/djcelery/templates/admin/djcelery/change_list.html deleted file mode 100644 index f35a801d50..0000000000 --- a/awx/lib/site-packages/djcelery/templates/admin/djcelery/change_list.html +++ /dev/null @@ -1,26 +0,0 @@ -{% extends "admin/change_list.html" %} -{% load i18n %} - -{% block breadcrumbs %} - - {% if wrong_scheduler %} -
    -
  • - Periodic tasks won't be dispatched unless you set the - CELERYBEAT_SCHEDULER setting to - djcelery.schedulers.DatabaseScheduler, - or specify it using the -S option to celerybeat -
  • -
- {% endif %} -{% endblock %} diff --git a/awx/lib/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html b/awx/lib/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html deleted file mode 100644 index 6152b76f20..0000000000 --- a/awx/lib/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html +++ /dev/null @@ -1,25 +0,0 @@ -{% extends "admin/base_site.html" %} -{% load i18n %} - -{% block breadcrumbs %} - -{% endblock %} - -{% block content %} -
{% csrf_token %} -
- {% for obj in queryset %} - - {% endfor %} - - - - -
- -{% endblock %} diff --git a/awx/lib/site-packages/djcelery/tests/test_backends/test_cache.py b/awx/lib/site-packages/djcelery/tests/test_backends/test_cache.py index 5bb88fcd5c..f7b4cbc91a 100644 --- a/awx/lib/site-packages/djcelery/tests/test_backends/test_cache.py +++ b/awx/lib/site-packages/djcelery/tests/test_backends/test_cache.py @@ -5,6 +5,7 @@ import sys from datetime import timedelta from billiard.einfo import ExceptionInfo +import django from django.core.cache.backends.base import InvalidCacheBackendError from celery import result @@ -103,7 +104,13 @@ class test_custom_CacheBackend(unittest.TestCase): from celery import current_app prev_backend = current_app.conf.CELERY_CACHE_BACKEND prev_module = sys.modules['djcelery.backends.cache'] - current_app.conf.CELERY_CACHE_BACKEND = 'dummy://' + + if django.VERSION >= (1, 3): + current_app.conf.CELERY_CACHE_BACKEND = \ + 'django.core.cache.backends.dummy.DummyCache' + else: + # Django 1.2 used 'scheme://' style cache backends + current_app.conf.CELERY_CACHE_BACKEND = 'dummy://' sys.modules.pop('djcelery.backends.cache') try: from djcelery.backends.cache import cache diff --git a/awx/lib/site-packages/djcelery/tests/test_backends/test_database.py b/awx/lib/site-packages/djcelery/tests/test_backends/test_database.py index 93afdd72c8..848d7714f0 100644 --- a/awx/lib/site-packages/djcelery/tests/test_backends/test_database.py +++ b/awx/lib/site-packages/djcelery/tests/test_backends/test_database.py @@ -1,5 +1,7 @@ from __future__ import absolute_import, unicode_literals +import celery + from datetime import timedelta from celery import current_app @@ -65,6 +67,9 @@ class TestDatabaseBackend(unittest.TestCase): x = AsyncResult(tid) self.assertEqual(x.result.get('foo'), 'bar') x.forget() + if celery.VERSION[0:3] == (3, 1, 10): + # bug in 3.1.10 means result did not clear cache after forget. + x._cache = None self.assertIsNone(x.result) def test_group_store(self): diff --git a/awx/lib/site-packages/djcelery/tests/test_snapshot.py b/awx/lib/site-packages/djcelery/tests/test_snapshot.py index 2bedabe0bd..4bf608ed82 100644 --- a/awx/lib/site-packages/djcelery/tests/test_snapshot.py +++ b/awx/lib/site-packages/djcelery/tests/test_snapshot.py @@ -4,7 +4,8 @@ from datetime import datetime from itertools import count from time import time -from celery.events import Event +from celery import states +from celery.events import Event as _Event from celery.events.state import State, Worker, Task from celery.utils import gen_unique_id @@ -15,6 +16,13 @@ from djcelery.utils import make_aware from djcelery.tests.utils import unittest _next_id = count(0).next +_next_clock = count(1).next + + +def Event(*args, **kwargs): + kwargs.setdefault('clock', _next_clock()) + kwargs.setdefault('local_received', time()) + return _Event(*args, **kwargs) def create_task(worker, **kwargs): @@ -45,14 +53,14 @@ class test_Camera(unittest.TestCase): t2 = time() t3 = time() for t in t1, t2, t3: - worker.on_heartbeat(t, t) + worker.event('heartbeat', t, t, {}) self.state.workers[worker.hostname] = worker self.assertEqual(self.cam.get_heartbeat(worker), make_aware(datetime.fromtimestamp(t3))) def test_handle_worker(self): worker = Worker(hostname='fuzzie') - worker.on_online(time(), time()) + worker.event('online', time(), time(), {}) self.cam._last_worker_write.clear() m = self.cam.handle_worker((worker.hostname, worker)) self.assertTrue(m) @@ -64,11 +72,11 @@ class test_Camera(unittest.TestCase): def test_handle_task_received(self): worker = Worker(hostname='fuzzie') - worker.on_online(time(), time()) + worker.event('oneline', time(), time(), {}) self.cam.handle_worker((worker.hostname, worker)) task = create_task(worker) - task.on_received(time()) + task.event('received', time(), time(), {}) self.assertEqual(task.state, 'RECEIVED') mt = self.cam.handle_task((task.uuid, task)) self.assertEqual(mt.name, task.name) @@ -80,36 +88,38 @@ class test_Camera(unittest.TestCase): def test_handle_task(self): worker1 = Worker(hostname='fuzzie') - worker1.on_online(time(), time()) + worker1.event('online', time(), time(), {}) mw = self.cam.handle_worker((worker1.hostname, worker1)) task1 = create_task(worker1) - task1.on_received(timestamp=time()) + task1.event('received', time(), time(), {}) mt = self.cam.handle_task((task1.uuid, task1)) self.assertEqual(mt.worker, mw) worker2 = Worker(hostname=None) task2 = create_task(worker2) - task2.on_received(timestamp=time()) + task2.event('received', time(), time(), {}) mt = self.cam.handle_task((task2.uuid, task2)) self.assertIsNone(mt.worker) - task1.on_succeeded(timestamp=time(), result=42) + task1.event('succeeded', time(), time(), {'result': 42}) + self.assertEqual(task1.state, states.SUCCESS) + self.assertEqual(task1.result, 42) mt = self.cam.handle_task((task1.uuid, task1)) self.assertEqual(mt.name, task1.name) self.assertEqual(mt.result, 42) task3 = create_task(worker1, name=None) - task3.on_revoked(timestamp=time()) + task3.event('revoked', time(), time(), {}) mt = self.cam.handle_task((task3.uuid, task3)) self.assertIsNone(mt) def assertExpires(self, dec, expired, tasks=10): worker = Worker(hostname='fuzzie') - worker.on_online(time(), time()) + worker.event('online', time(), time(), {}) for total in xrange(tasks): task = create_task(worker) - task.on_received(timestamp=time() - dec) - task.on_succeeded(timestamp=time() - dec, result=42) + task.event('received', time() - dec, time() - dec, {}) + task.event('succeeded', time() - dec, time() - dec, {'result': 42}) self.assertTrue(task.name) self.assertTrue(self.cam.handle_task((task.uuid, task))) self.assertEqual(self.cam.on_cleanup(), expired) diff --git a/awx/lib/site-packages/djcelery/urls.py b/awx/lib/site-packages/djcelery/urls.py index d08101e0b9..b6e5cad4ef 100644 --- a/awx/lib/site-packages/djcelery/urls.py +++ b/awx/lib/site-packages/djcelery/urls.py @@ -16,7 +16,7 @@ from __future__ import absolute_import, unicode_literals try: from django.conf.urls import patterns, url except ImportError: # deprecated since Django 1.4 - from django.conf.urls import patterns, url # noqa + from django.conf.urls.defaults import patterns, url # noqa from . import views diff --git a/awx/lib/site-packages/djcelery/views.py b/awx/lib/site-packages/djcelery/views.py index 876630910e..34cb3077ad 100644 --- a/awx/lib/site-packages/djcelery/views.py +++ b/awx/lib/site-packages/djcelery/views.py @@ -100,8 +100,12 @@ def task_webhook(fun): y = int(request.GET['y']) return x + y - >>> response = add(request) - >>> response.content + def view(request): + response = add(request) + print(response.content) + + Gives:: + "{'status': 'success', 'retval': 100}" """ diff --git a/awx/lib/site-packages/funtests/tests/__init__.py b/awx/lib/site-packages/funtests/tests/__init__.py deleted file mode 100644 index 41cbef6791..0000000000 --- a/awx/lib/site-packages/funtests/tests/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -import os -import sys - -sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) -print(sys.path[0]) -sys.path.insert(0, os.getcwd()) -print(sys.path[0]) diff --git a/awx/lib/site-packages/funtests/tests/test_multiprocessing.py b/awx/lib/site-packages/funtests/tests/test_multiprocessing.py deleted file mode 100644 index 33ab25ac9a..0000000000 --- a/awx/lib/site-packages/funtests/tests/test_multiprocessing.py +++ /dev/null @@ -1,2036 +0,0 @@ -#!/usr/bin/env python - -from __future__ import absolute_import - -# -# Unit tests for the multiprocessing package -# - -import unittest -import Queue -import time -import sys -import os -import gc -import array -import random -import logging -from nose import SkipTest -from test import test_support -from StringIO import StringIO -try: - from billiard._ext import _billiard -except ImportError as exc: - raise SkipTest(exc) -# import threading after _billiard to raise a more revelant error -# message: "No module named _billiard". _billiard is not compiled -# without thread support. -import threading - -# Work around broken sem_open implementations -try: - import billiard.synchronize -except ImportError as exc: - raise SkipTest(exc) - -import billiard.dummy -import billiard.connection -import billiard.managers -import billiard.heap -import billiard.pool - -from billiard import util -from billiard.compat import bytes - -latin = str - -# Constants -LOG_LEVEL = util.SUBWARNING - -DELTA = 0.1 -CHECK_TIMINGS = False # making true makes tests take a lot longer - # and can sometimes cause some non-serious - # failures because some calls block a bit - # longer than expected -if CHECK_TIMINGS: - TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 -else: - TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 - -HAVE_GETVALUE = not getattr(_billiard, - 'HAVE_BROKEN_SEM_GETVALUE', False) - -WIN32 = (sys.platform == "win32") - -# Some tests require ctypes -try: - from ctypes import Structure, c_int, c_double -except ImportError: - Structure = object - c_int = c_double = None - -try: - from ctypes import Value -except ImportError: - Value = None - -try: - from ctypes import copy as ctypes_copy -except ImportError: - ctypes_copy = None - - -class TimingWrapper(object): - """Creates a wrapper for a function which records the - time it takes to finish""" - - def __init__(self, func): - self.func = func - self.elapsed = None - - def __call__(self, *args, **kwds): - t = time.time() - try: - return self.func(*args, **kwds) - finally: - self.elapsed = time.time() - t - - -class BaseTestCase(object): - """Base class for test cases""" - ALLOWED_TYPES = ('processes', 'manager', 'threads') - - def assertTimingAlmostEqual(self, a, b): - if CHECK_TIMINGS: - self.assertAlmostEqual(a, b, 1) - - def assertReturnsIfImplemented(self, value, func, *args): - try: - res = func(*args) - except NotImplementedError: - pass - else: - return self.assertEqual(value, res) - - -def get_value(self): - """Return the value of a semaphore""" - try: - return self.get_value() - except AttributeError: - try: - return self._Semaphore__value - except AttributeError: - try: - return self._value - except AttributeError: - raise NotImplementedError - - -class _TestProcesses(BaseTestCase): - - ALLOWED_TYPES = ('processes', 'threads') - - def test_current(self): - if self.TYPE == 'threads': - return - - current = self.current_process() - authkey = current.authkey - - self.assertTrue(current.is_alive()) - self.assertTrue(not current.daemon) - self.assertIsInstance(authkey, bytes) - self.assertTrue(len(authkey) > 0) - self.assertEqual(current.ident, os.getpid()) - self.assertEqual(current.exitcode, None) - - def _test(self, q, *args, **kwds): - current = self.current_process() - q.put(args) - q.put(kwds) - q.put(current.name) - if self.TYPE != 'threads': - q.put(bytes(current.authkey, 'ascii')) - q.put(current.pid) - - def test_process(self): - q = self.Queue(1) - e = self.Event() # noqa - args = (q, 1, 2) - kwargs = {'hello': 23, 'bye': 2.54} - name = 'SomeProcess' - p = self.Process( - target=self._test, args=args, kwargs=kwargs, name=name - ) - p.daemon = True - current = self.current_process() - - if self.TYPE != 'threads': - self.assertEquals(p.authkey, current.authkey) - self.assertEquals(p.is_alive(), False) - self.assertEquals(p.daemon, True) - self.assertNotIn(p, self.active_children()) - self.assertTrue(type(self.active_children()) is list) - self.assertEqual(p.exitcode, None) - - p.start() - - self.assertEquals(p.exitcode, None) - self.assertEquals(p.is_alive(), True) - self.assertIn(p, self.active_children()) - - self.assertEquals(q.get(), args[1:]) - self.assertEquals(q.get(), kwargs) - self.assertEquals(q.get(), p.name) - if self.TYPE != 'threads': - self.assertEquals(q.get(), current.authkey) - self.assertEquals(q.get(), p.pid) - - p.join() - - self.assertEquals(p.exitcode, 0) - self.assertEquals(p.is_alive(), False) - self.assertNotIn(p, self.active_children()) - - def _test_terminate(self): - time.sleep(1000) - - def test_terminate(self): - if self.TYPE == 'threads': - return - - p = self.Process(target=self._test_terminate) - p.daemon = True - p.start() - - self.assertEqual(p.is_alive(), True) - self.assertIn(p, self.active_children()) - self.assertEqual(p.exitcode, None) - - p.terminate() - - join = TimingWrapper(p.join) - self.assertEqual(join(), None) - self.assertTimingAlmostEqual(join.elapsed, 0.0) - - self.assertEqual(p.is_alive(), False) - self.assertNotIn(p, self.active_children()) - - p.join() - - # XXX sometimes get p.exitcode == 0 on Windows ... - #self.assertEqual(p.exitcode, -signal.SIGTERM) - - def test_cpu_count(self): - try: - cpus = billiard.cpu_count() - except NotImplementedError: - cpus = 1 - self.assertTrue(type(cpus) is int) - self.assertTrue(cpus >= 1) - - def test_active_children(self): - self.assertEqual(type(self.active_children()), list) - - p = self.Process(target=time.sleep, args=(DELTA,)) - self.assertNotIn(p, self.active_children()) - - p.start() - self.assertIn(p, self.active_children()) - - p.join() - self.assertNotIn(p, self.active_children()) - - def _test_recursion(self, wconn, id): - __import__('billiard.forking') - wconn.send(id) - if len(id) < 2: - for i in range(2): - p = self.Process( - target=self._test_recursion, args=(wconn, id + [i]) - ) - p.start() - p.join() - - def test_recursion(self): - rconn, wconn = self.Pipe(duplex=False) - self._test_recursion(wconn, []) - - time.sleep(DELTA) - result = [] - while rconn.poll(): - result.append(rconn.recv()) - - expected = [ - [], - [0], - [0, 0], - [0, 1], - [1], - [1, 0], - [1, 1] - ] - self.assertEqual(result, expected) - - -class _UpperCaser(billiard.Process): - - def __init__(self): - billiard.Process.__init__(self) - self.child_conn, self.parent_conn = billiard.Pipe() - - def run(self): - self.parent_conn.close() - for s in iter(self.child_conn.recv, None): - self.child_conn.send(s.upper()) - self.child_conn.close() - - def submit(self, s): - assert type(s) is str - self.parent_conn.send(s) - return self.parent_conn.recv() - - def stop(self): - self.parent_conn.send(None) - self.parent_conn.close() - self.child_conn.close() - - -class _TestSubclassingProcess(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def test_subclassing(self): - uppercaser = _UpperCaser() - uppercaser.start() - self.assertEqual(uppercaser.submit('hello'), 'HELLO') - self.assertEqual(uppercaser.submit('world'), 'WORLD') - uppercaser.stop() - uppercaser.join() - - -def queue_empty(q): - if hasattr(q, 'empty'): - return q.empty() - else: - return q.qsize() == 0 - - -def queue_full(q, maxsize): - if hasattr(q, 'full'): - return q.full() - else: - return q.qsize() == maxsize - - -class _TestQueue(BaseTestCase): - - def _test_put(self, queue, child_can_start, parent_can_continue): - child_can_start.wait() - for i in range(6): - queue.get() - parent_can_continue.set() - - def test_put(self): - MAXSIZE = 6 - queue = self.Queue(maxsize=MAXSIZE) - child_can_start = self.Event() - parent_can_continue = self.Event() - - proc = self.Process( - target=self._test_put, - args=(queue, child_can_start, parent_can_continue) - ) - proc.daemon = True - proc.start() - - self.assertEqual(queue_empty(queue), True) - self.assertEqual(queue_full(queue, MAXSIZE), False) - - queue.put(1) - queue.put(2, True) - queue.put(3, True, None) - queue.put(4, False) - queue.put(5, False, None) - queue.put_nowait(6) - - # the values may be in buffer but not yet in pipe so sleep a bit - time.sleep(DELTA) - - self.assertEqual(queue_empty(queue), False) - self.assertEqual(queue_full(queue, MAXSIZE), True) - - put = TimingWrapper(queue.put) - put_nowait = TimingWrapper(queue.put_nowait) - - self.assertRaises(Queue.Full, put, 7, False) - self.assertTimingAlmostEqual(put.elapsed, 0) - - self.assertRaises(Queue.Full, put, 7, False, None) - self.assertTimingAlmostEqual(put.elapsed, 0) - - self.assertRaises(Queue.Full, put_nowait, 7) - self.assertTimingAlmostEqual(put_nowait.elapsed, 0) - - self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1) - self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) - - self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2) - self.assertTimingAlmostEqual(put.elapsed, 0) - - self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3) - self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) - - child_can_start.set() - parent_can_continue.wait() - - self.assertEqual(queue_empty(queue), True) - self.assertEqual(queue_full(queue, MAXSIZE), False) - - proc.join() - - def _test_get(self, queue, child_can_start, parent_can_continue): - child_can_start.wait() - #queue.put(1) - queue.put(2) - queue.put(3) - queue.put(4) - queue.put(5) - parent_can_continue.set() - - def test_get(self): - queue = self.Queue() - child_can_start = self.Event() - parent_can_continue = self.Event() - - proc = self.Process( - target=self._test_get, - args=(queue, child_can_start, parent_can_continue) - ) - proc.daemon = True - proc.start() - - self.assertEqual(queue_empty(queue), True) - - child_can_start.set() - parent_can_continue.wait() - - time.sleep(DELTA) - self.assertEqual(queue_empty(queue), False) - - # Hangs unexpectedly, remove for now - #self.assertEqual(queue.get(), 1) - self.assertEqual(queue.get(True, None), 2) - self.assertEqual(queue.get(True), 3) - self.assertEqual(queue.get(timeout=1), 4) - self.assertEqual(queue.get_nowait(), 5) - - self.assertEqual(queue_empty(queue), True) - - get = TimingWrapper(queue.get) - get_nowait = TimingWrapper(queue.get_nowait) - - self.assertRaises(Queue.Empty, get, False) - self.assertTimingAlmostEqual(get.elapsed, 0) - - self.assertRaises(Queue.Empty, get, False, None) - self.assertTimingAlmostEqual(get.elapsed, 0) - - self.assertRaises(Queue.Empty, get_nowait) - self.assertTimingAlmostEqual(get_nowait.elapsed, 0) - - self.assertRaises(Queue.Empty, get, True, TIMEOUT1) - self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) - - self.assertRaises(Queue.Empty, get, False, TIMEOUT2) - self.assertTimingAlmostEqual(get.elapsed, 0) - - self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3) - self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) - - proc.join() - - def _test_fork(self, queue): - for i in range(10, 20): - queue.put(i) - # note that at this point the items may only be buffered, so the - # process cannot shutdown until the feeder thread has finished - # pushing items onto the pipe. - - def test_fork(self): - # Old versions of Queue would fail to create a new feeder - # thread for a forked process if the original process had its - # own feeder thread. This test checks that this no longer - # happens. - - queue = self.Queue() - - # put items on queue so that main process starts a feeder thread - for i in range(10): - queue.put(i) - - # wait to make sure thread starts before we fork a new process - time.sleep(DELTA) - - # fork process - p = self.Process(target=self._test_fork, args=(queue,)) - p.start() - - # check that all expected items are in the queue - for i in range(20): - self.assertEqual(queue.get(), i) - self.assertRaises(Queue.Empty, queue.get, False) - - p.join() - - def test_qsize(self): - q = self.Queue() - try: - self.assertEqual(q.qsize(), 0) - except NotImplementedError: - return - q.put(1) - self.assertEqual(q.qsize(), 1) - q.put(5) - self.assertEqual(q.qsize(), 2) - q.get() - self.assertEqual(q.qsize(), 1) - q.get() - self.assertEqual(q.qsize(), 0) - - def _test_task_done(self, q): - for obj in iter(q.get, None): - time.sleep(DELTA) - q.task_done() - - def test_task_done(self): - queue = self.JoinableQueue() - - if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'): - self.skipTest("requires 'queue.task_done()' method") - - workers = [self.Process(target=self._test_task_done, args=(queue,)) - for i in xrange(4)] - - for p in workers: - p.start() - - for i in xrange(10): - queue.put(i) - - queue.join() - - for p in workers: - queue.put(None) - - for p in workers: - p.join() - - -class _TestLock(BaseTestCase): - - def test_lock(self): - lock = self.Lock() - self.assertEqual(lock.acquire(), True) - self.assertEqual(lock.acquire(False), False) - self.assertEqual(lock.release(), None) - self.assertRaises((ValueError, threading.ThreadError), lock.release) - - def test_rlock(self): - lock = self.RLock() - self.assertEqual(lock.acquire(), True) - self.assertEqual(lock.acquire(), True) - self.assertEqual(lock.acquire(), True) - self.assertEqual(lock.release(), None) - self.assertEqual(lock.release(), None) - self.assertEqual(lock.release(), None) - self.assertRaises((AssertionError, RuntimeError), lock.release) - - def test_lock_context(self): - with self.Lock(): - pass - - -class _TestSemaphore(BaseTestCase): - - def _test_semaphore(self, sem): - self.assertReturnsIfImplemented(2, get_value, sem) - self.assertEqual(sem.acquire(), True) - self.assertReturnsIfImplemented(1, get_value, sem) - self.assertEqual(sem.acquire(), True) - self.assertReturnsIfImplemented(0, get_value, sem) - self.assertEqual(sem.acquire(False), False) - self.assertReturnsIfImplemented(0, get_value, sem) - self.assertEqual(sem.release(), None) - self.assertReturnsIfImplemented(1, get_value, sem) - self.assertEqual(sem.release(), None) - self.assertReturnsIfImplemented(2, get_value, sem) - - def test_semaphore(self): - sem = self.Semaphore(2) - self._test_semaphore(sem) - self.assertEqual(sem.release(), None) - self.assertReturnsIfImplemented(3, get_value, sem) - self.assertEqual(sem.release(), None) - self.assertReturnsIfImplemented(4, get_value, sem) - - def test_bounded_semaphore(self): - sem = self.BoundedSemaphore(2) - self._test_semaphore(sem) - # Currently fails on OS/X - #if HAVE_GETVALUE: - # self.assertRaises(ValueError, sem.release) - # self.assertReturnsIfImplemented(2, get_value, sem) - - def test_timeout(self): - if self.TYPE != 'processes': - return - - sem = self.Semaphore(0) - acquire = TimingWrapper(sem.acquire) - - self.assertEqual(acquire(False), False) - self.assertTimingAlmostEqual(acquire.elapsed, 0.0) - - self.assertEqual(acquire(False, None), False) - self.assertTimingAlmostEqual(acquire.elapsed, 0.0) - - self.assertEqual(acquire(False, TIMEOUT1), False) - self.assertTimingAlmostEqual(acquire.elapsed, 0) - - self.assertEqual(acquire(True, TIMEOUT2), False) - self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) - - self.assertEqual(acquire(timeout=TIMEOUT3), False) - self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) - - -class _TestCondition(BaseTestCase): - - def f(self, cond, sleeping, woken, timeout=None): - cond.acquire() - sleeping.release() - cond.wait(timeout) - woken.release() - cond.release() - - def check_invariant(self, cond): - # this is only supposed to succeed when there are no sleepers - if self.TYPE == 'processes': - try: - sleepers = (cond._sleeping_count.get_value() - - cond._woken_count.get_value()) - self.assertEqual(sleepers, 0) - self.assertEqual(cond._wait_semaphore.get_value(), 0) - except NotImplementedError: - pass - - def test_notify(self): - cond = self.Condition() - sleeping = self.Semaphore(0) - woken = self.Semaphore(0) - - p = self.Process(target=self.f, args=(cond, sleeping, woken)) - p.daemon = True - p.start() - - p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) - p.daemon = True - p.start() - - # wait for both children to start sleeping - sleeping.acquire() - sleeping.acquire() - - # check no process/thread has woken up - time.sleep(DELTA) - self.assertReturnsIfImplemented(0, get_value, woken) - - # wake up one process/thread - cond.acquire() - cond.notify() - cond.release() - - # check one process/thread has woken up - time.sleep(DELTA) - self.assertReturnsIfImplemented(1, get_value, woken) - - # wake up another - cond.acquire() - cond.notify() - cond.release() - - # check other has woken up - time.sleep(DELTA) - self.assertReturnsIfImplemented(2, get_value, woken) - - # check state is not mucked up - self.check_invariant(cond) - p.join() - - def test_notify_all(self): - cond = self.Condition() - sleeping = self.Semaphore(0) - woken = self.Semaphore(0) - - # start some threads/processes which will timeout - for i in range(3): - p = self.Process(target=self.f, - args=(cond, sleeping, woken, TIMEOUT1)) - p.daemon = True - p.start() - - t = threading.Thread(target=self.f, - args=(cond, sleeping, woken, TIMEOUT1)) - t.daemon = True - t.start() - - # wait for them all to sleep - for i in xrange(6): - sleeping.acquire() - - # check they have all timed out - for i in xrange(6): - woken.acquire() - self.assertReturnsIfImplemented(0, get_value, woken) - - # check state is not mucked up - self.check_invariant(cond) - - # start some more threads/processes - for i in range(3): - p = self.Process(target=self.f, args=(cond, sleeping, woken)) - p.daemon = True - p.start() - - t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) - t.daemon = True - t.start() - - # wait for them to all sleep - for i in xrange(6): - sleeping.acquire() - - # check no process/thread has woken up - time.sleep(DELTA) - self.assertReturnsIfImplemented(0, get_value, woken) - - # wake them all up - cond.acquire() - cond.notify_all() - cond.release() - - # check they have all woken - time.sleep(DELTA) - self.assertReturnsIfImplemented(6, get_value, woken) - - # check state is not mucked up - self.check_invariant(cond) - - def test_timeout(self): - cond = self.Condition() - wait = TimingWrapper(cond.wait) - cond.acquire() - res = wait(TIMEOUT1) - cond.release() - self.assertEqual(res, None) - self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) - - -class _TestEvent(BaseTestCase): - - def _test_event(self, event): - time.sleep(TIMEOUT2) - event.set() - - def test_event(self): - event = self.Event() - wait = TimingWrapper(event.wait) - - # Removed temporaily, due to API shear, this does not - # work with threading._Event objects. is_set == isSet - self.assertEqual(event.is_set(), False) - - # Removed, threading.Event.wait() will return the value of the __flag - # instead of None. API Shear with the semaphore backed mp.Event - self.assertEqual(wait(0.0), False) - self.assertTimingAlmostEqual(wait.elapsed, 0.0) - self.assertEqual(wait(TIMEOUT1), False) - self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) - - event.set() - - # See note above on the API differences - self.assertEqual(event.is_set(), True) - self.assertEqual(wait(), True) - self.assertTimingAlmostEqual(wait.elapsed, 0.0) - self.assertEqual(wait(TIMEOUT1), True) - self.assertTimingAlmostEqual(wait.elapsed, 0.0) - # self.assertEqual(event.is_set(), True) - - event.clear() - - #self.assertEqual(event.is_set(), False) - - self.Process(target=self._test_event, args=(event,)).start() - self.assertEqual(wait(), True) - - -class _TestValue(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - codes_values = [ - ('i', 4343, 24234), - ('d', 3.625, -4.25), - ('h', -232, 234), - ('c', latin('x'), latin('y')) - ] - - def _test(self, values): - for sv, cv in zip(values, self.codes_values): - sv.value = cv[2] - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_value(self, raw=False): - if raw: - values = [self.RawValue(code, value) - for code, value, _ in self.codes_values] - else: - values = [self.Value(code, value) - for code, value, _ in self.codes_values] - - for sv, cv in zip(values, self.codes_values): - self.assertEqual(sv.value, cv[1]) - - proc = self.Process(target=self._test, args=(values,)) - proc.start() - proc.join() - - for sv, cv in zip(values, self.codes_values): - self.assertEqual(sv.value, cv[2]) - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_rawvalue(self): - self.test_value(raw=True) - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_getobj_getlock(self): - val1 = self.Value('i', 5) - lock1 = val1.get_lock() # noqa - obj1 = val1.get_obj() # noqa - - val2 = self.Value('i', 5, lock=None) - lock2 = val2.get_lock() # noqa - obj2 = val2.get_obj() # noqa - - lock = self.Lock() - val3 = self.Value('i', 5, lock=lock) - lock3 = val3.get_lock() # noqa - obj3 = val3.get_obj() # noqa - self.assertEqual(lock, lock3) - - arr4 = self.Value('i', 5, lock=False) - self.assertFalse(hasattr(arr4, 'get_lock')) - self.assertFalse(hasattr(arr4, 'get_obj')) - - self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') - - arr5 = self.RawValue('i', 5) - self.assertFalse(hasattr(arr5, 'get_lock')) - self.assertFalse(hasattr(arr5, 'get_obj')) - - -class _TestArray(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def f(self, seq): - for i in range(1, len(seq)): - seq[i] += seq[i - 1] - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_array(self, raw=False): - seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] - if raw: - arr = self.RawArray('i', seq) - else: - arr = self.Array('i', seq) - - self.assertEqual(len(arr), len(seq)) - self.assertEqual(arr[3], seq[3]) - self.assertEqual(list(arr[2:7]), list(seq[2:7])) - - arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) - - self.assertEqual(list(arr[:]), seq) - - self.f(seq) - - p = self.Process(target=self.f, args=(arr,)) - p.start() - p.join() - - self.assertEqual(list(arr[:]), seq) - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_rawarray(self): - self.test_array(raw=True) - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_getobj_getlock_obj(self): - arr1 = self.Array('i', range(10)) - lock1 = arr1.get_lock() # noqa - obj1 = arr1.get_obj() # noqa - - arr2 = self.Array('i', range(10), lock=None) - lock2 = arr2.get_lock() # noqa - obj2 = arr2.get_obj() # noqa - - lock = self.Lock() - arr3 = self.Array('i', range(10), lock=lock) - lock3 = arr3.get_lock() - obj3 = arr3.get_obj() # noqa - self.assertEqual(lock, lock3) - - arr4 = self.Array('i', range(10), lock=False) - self.assertFalse(hasattr(arr4, 'get_lock')) - self.assertFalse(hasattr(arr4, 'get_obj')) - self.assertRaises(AttributeError, - self.Array, 'i', range(10), lock='notalock') - - arr5 = self.RawArray('i', range(10)) - self.assertFalse(hasattr(arr5, 'get_lock')) - self.assertFalse(hasattr(arr5, 'get_obj')) - - -class _TestContainers(BaseTestCase): - - ALLOWED_TYPES = ('manager',) - - def test_list(self): - a = self.list(range(10)) - self.assertEqual(a[:], range(10)) - - b = self.list() - self.assertEqual(b[:], []) - - b.extend(range(5)) - self.assertEqual(b[:], range(5)) - - self.assertEqual(b[2], 2) - self.assertEqual(b[2:10], [2, 3, 4]) - - b *= 2 - self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) - - self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) - - self.assertEqual(a[:], range(10)) - - d = [a, b] - e = self.list(d) - self.assertEqual( - e[:], - [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] - ) - - f = self.list([a]) - a.append('hello') - self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]) - - def test_dict(self): - d = self.dict() - indices = range(65, 70) - for i in indices: - d[i] = chr(i) - self.assertEqual(d.copy(), dict((j, chr(j)) for j in indices)) - self.assertEqual(sorted(d.keys()), indices) - self.assertEqual(sorted(d.values()), [chr(z) for z in indices]) - self.assertEqual(sorted(d.items()), [(x, chr(x)) for x in indices]) - - def test_namespace(self): - n = self.Namespace() - n.name = 'Bob' - n.job = 'Builder' - n._hidden = 'hidden' - self.assertEqual((n.name, n.job), ('Bob', 'Builder')) - del n.job - self.assertEqual(str(n), "Namespace(name='Bob')") - self.assertTrue(hasattr(n, 'name')) - self.assertTrue(not hasattr(n, 'job')) - - -def sqr(x, wait=0.0): - time.sleep(wait) - return x * x - - -class _TestPool(BaseTestCase): - - def test_apply(self): - papply = self.pool.apply - self.assertEqual(papply(sqr, (5,)), sqr(5)) - self.assertEqual(papply(sqr, (), {'x': 3}), sqr(x=3)) - - def test_map(self): - pmap = self.pool.map - self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10))) - self.assertEqual(pmap(sqr, range(100), chunksize=20), - map(sqr, range(100))) - - def test_map_chunksize(self): - try: - self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) - except billiard.TimeoutError: - self.fail("pool.map_async with chunksize stalled on null list") - - def test_async(self): - res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) - get = TimingWrapper(res.get) - self.assertEqual(get(), 49) - self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) - - def test_async_timeout(self): - res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2)) - get = TimingWrapper(res.get) - self.assertRaises(billiard.TimeoutError, get, timeout=TIMEOUT2) - self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) - - def test_imap(self): - it = self.pool.imap(sqr, range(10)) - self.assertEqual(list(it), map(sqr, range(10))) - - it = self.pool.imap(sqr, range(10)) - for i in range(10): - self.assertEqual(it.next(), i * i) - self.assertRaises(StopIteration, it.next) - - it = self.pool.imap(sqr, range(1000), chunksize=100) - for i in range(1000): - self.assertEqual(it.next(), i * i) - self.assertRaises(StopIteration, it.next) - - def test_imap_unordered(self): - it = self.pool.imap_unordered(sqr, range(1000)) - self.assertEqual(sorted(it), map(sqr, range(1000))) - - it = self.pool.imap_unordered(sqr, range(1000), chunksize=53) - self.assertEqual(sorted(it), map(sqr, range(1000))) - - def test_make_pool(self): - p = billiard.Pool(3) - self.assertEqual(3, len(p._pool)) - p.close() - p.join() - - def test_terminate(self): - if self.TYPE == 'manager': - # On Unix a forked process increfs each shared object to - # which its parent process held a reference. If the - # forked process gets terminated then there is likely to - # be a reference leak. So to prevent - # _TestZZZNumberOfObjects from failing we skip this test - # when using a manager. - return - - self.pool.map_async( - time.sleep, [0.1 for i in range(10000)], chunksize=1 - ) - self.pool.terminate() - join = TimingWrapper(self.pool.join) - join() - self.assertTrue(join.elapsed < 0.2) - - -class _TestPoolWorkerLifetime(BaseTestCase): - ALLOWED_TYPES = ('processes', ) - - def test_pool_worker_lifetime(self): - p = billiard.Pool(3, maxtasksperchild=10) - self.assertEqual(3, len(p._pool)) - origworkerpids = [w.pid for w in p._pool] - # Run many tasks so each worker gets replaced (hopefully) - results = [] - for i in range(100): - results.append(p.apply_async(sqr, (i, ))) - # Fetch the results and verify we got the right answers, - # also ensuring all the tasks have completed. - for (j, res) in enumerate(results): - self.assertEqual(res.get(), sqr(j)) - # Refill the pool - p._repopulate_pool() - # Wait until all workers are alive - countdown = 5 - while countdown and not all(w.is_alive() for w in p._pool): - countdown -= 1 - time.sleep(DELTA) - finalworkerpids = [worker.pid for worker in p._pool] - # All pids should be assigned. See issue #7805. - self.assertNotIn(None, origworkerpids) - self.assertNotIn(None, finalworkerpids) - # Finally, check that the worker pids have changed - self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) - p.close() - p.join() - - -class _TestZZZNumberOfObjects(BaseTestCase): - # Test that manager has expected number of shared objects left - - # Because test cases are sorted alphabetically, this one will get - # run after all the other tests for the manager. It tests that - # there have been no "reference leaks" for the manager's shared - # objects. Note the comment in _TestPool.test_terminate(). - ALLOWED_TYPES = ('manager',) - - def test_number_of_objects(self): - EXPECTED_NUMBER = 1 # the pool object is still alive - billiard.active_children() # discard dead process objs - gc.collect() # do garbage collection - refs = self.manager._number_of_objects() - debug_info = self.manager._debug_info() - if refs != EXPECTED_NUMBER: - print(self.manager._debug_info()) - print(debug_info) - - self.assertEqual(refs, EXPECTED_NUMBER) - -# Test of creating a customized manager class -from billiard.managers import BaseManager, BaseProxy, RemoteError - - -class FooBar(object): - - def f(self): - return 'f()' - - def g(self): - raise ValueError - - def _h(self): - return '_h()' - - -def baz(): - for i in xrange(10): - yield i * i - - -class IteratorProxy(BaseProxy): - _exposed_ = ('next', '__next__') - - def __iter__(self): - return self - - def next(self): - return self._callmethod('next') - - def __next__(self): - return self._callmethod('__next__') - - -class MyManager(BaseManager): - pass - -MyManager.register('Foo', callable=FooBar) -MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) -MyManager.register('baz', callable=baz, proxytype=IteratorProxy) - - -class _TestMyManager(BaseTestCase): - - ALLOWED_TYPES = ('manager',) - - def test_mymanager(self): - manager = MyManager() - manager.start() - - foo = manager.Foo() - bar = manager.Bar() - baz = manager.baz() - - foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] - bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] - - self.assertEqual(foo_methods, ['f', 'g']) - self.assertEqual(bar_methods, ['f', '_h']) - - self.assertEqual(foo.f(), 'f()') - self.assertRaises(ValueError, foo.g) - self.assertEqual(foo._callmethod('f'), 'f()') - self.assertRaises(RemoteError, foo._callmethod, '_h') - - self.assertEqual(bar.f(), 'f()') - self.assertEqual(bar._h(), '_h()') - self.assertEqual(bar._callmethod('f'), 'f()') - self.assertEqual(bar._callmethod('_h'), '_h()') - - self.assertEqual(list(baz), [i * i for i in range(10)]) - - manager.shutdown() - -_queue = Queue.Queue() - - -# Test of connecting to a remote server and using xmlrpclib for serialization -def get_queue(): - return _queue - - -class QueueManager(BaseManager): - '''manager class used by server process''' -QueueManager.register('get_queue', callable=get_queue) - - -class QueueManager2(BaseManager): - '''manager class which specifies the same interface as QueueManager''' -QueueManager2.register('get_queue') - - -SERIALIZER = 'xmlrpclib' - - -class _TestRemoteManager(BaseTestCase): - - ALLOWED_TYPES = ('manager',) - - def _putter(self, address, authkey): - manager = QueueManager2( - address=address, authkey=authkey, serializer=SERIALIZER - ) - manager.connect() - queue = manager.get_queue() - queue.put(('hello world', None, True, 2.25)) - - def test_remote(self): - authkey = os.urandom(32) - - manager = QueueManager( - address=('localhost', 0), authkey=authkey, serializer=SERIALIZER - ) - manager.start() - - p = self.Process(target=self._putter, args=(manager.address, authkey)) - p.start() - - manager2 = QueueManager2( - address=manager.address, authkey=authkey, serializer=SERIALIZER - ) - manager2.connect() - queue = manager2.get_queue() - - # Note that xmlrpclib will deserialize object as a list not a tuple - self.assertEqual(queue.get(), ['hello world', None, True, 2.25]) - - # Because we are using xmlrpclib for serialization instead of - # pickle this will cause a serialization error. - self.assertRaises(Exception, queue.put, time.sleep) - - # Make queue finalizer run before the server is stopped - del queue - manager.shutdown() - - -class _TestManagerRestart(BaseTestCase): - - def _putter(self, address, authkey): - manager = QueueManager( - address=address, authkey=authkey, serializer=SERIALIZER) - manager.connect() - queue = manager.get_queue() - queue.put('hello world') - - def test_rapid_restart(self): - authkey = os.urandom(32) - manager = QueueManager( - address=('localhost', 0), authkey=authkey, serializer=SERIALIZER) - addr = manager.get_server().address - manager.start() - - p = self.Process(target=self._putter, args=(manager.address, authkey)) - p.start() - queue = manager.get_queue() - self.assertEqual(queue.get(), 'hello world') - del queue - manager.shutdown() - manager = QueueManager( - address=addr, authkey=authkey, serializer=SERIALIZER) - manager.start() - manager.shutdown() - -SENTINEL = latin('') - - -class _TestConnection(BaseTestCase): - - ALLOWED_TYPES = ('processes', 'threads') - - def _echo(self, conn): - for msg in iter(conn.recv_bytes, SENTINEL): - conn.send_bytes(msg) - conn.close() - - def test_connection(self): - conn, child_conn = self.Pipe() - - p = self.Process(target=self._echo, args=(child_conn,)) - p.daemon = True - p.start() - - seq = [1, 2.25, None] - msg = latin('hello world') - longmsg = msg * 10 - arr = array.array('i', range(4)) - - if self.TYPE == 'processes': - self.assertEqual(type(conn.fileno()), int) - - self.assertEqual(conn.send(seq), None) - self.assertEqual(conn.recv(), seq) - - self.assertEqual(conn.send_bytes(msg), None) - self.assertEqual(conn.recv_bytes(), msg) - - if self.TYPE == 'processes': - buffer = array.array('i', [0] * 10) - expected = list(arr) + [0] * (10 - len(arr)) - self.assertEqual(conn.send_bytes(arr), None) - self.assertEqual(conn.recv_bytes_into(buffer), - len(arr) * buffer.itemsize) - self.assertEqual(list(buffer), expected) - - buffer = array.array('i', [0] * 10) - expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) - self.assertEqual(conn.send_bytes(arr), None) - self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), - len(arr) * buffer.itemsize) - self.assertEqual(list(buffer), expected) - - buffer = bytearray(latin(' ' * 40)) - self.assertEqual(conn.send_bytes(longmsg), None) - try: - res = conn.recv_bytes_into(buffer) - except billiard.BufferTooShort as exc: - self.assertEqual(exc.args, (longmsg,)) - else: - self.fail('expected BufferTooShort, got %s' % res) - - poll = TimingWrapper(conn.poll) - - self.assertEqual(poll(), False) - self.assertTimingAlmostEqual(poll.elapsed, 0) - - self.assertEqual(poll(TIMEOUT1), False) - self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) - - conn.send(None) - - self.assertEqual(poll(TIMEOUT1), True) - self.assertTimingAlmostEqual(poll.elapsed, 0) - - self.assertEqual(conn.recv(), None) - - really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb - conn.send_bytes(really_big_msg) - self.assertEqual(conn.recv_bytes(), really_big_msg) - - conn.send_bytes(SENTINEL) # tell child to quit - child_conn.close() - - if self.TYPE == 'processes': - self.assertEqual(conn.readable, True) - self.assertEqual(conn.writable, True) - self.assertRaises(EOFError, conn.recv) - self.assertRaises(EOFError, conn.recv_bytes) - - p.join() - - def test_duplex_false(self): - reader, writer = self.Pipe(duplex=False) - self.assertEqual(writer.send(1), None) - self.assertEqual(reader.recv(), 1) - if self.TYPE == 'processes': - self.assertEqual(reader.readable, True) - self.assertEqual(reader.writable, False) - self.assertEqual(writer.readable, False) - self.assertEqual(writer.writable, True) - self.assertRaises(IOError, reader.send, 2) - self.assertRaises(IOError, writer.recv) - self.assertRaises(IOError, writer.poll) - - def test_spawn_close(self): - # We test that a pipe connection can be closed by parent - # process immediately after child is spawned. On Windows this - # would have sometimes failed on old versions because - # child_conn would be closed before the child got a chance to - # duplicate it. - conn, child_conn = self.Pipe() - - p = self.Process(target=self._echo, args=(child_conn,)) - p.start() - child_conn.close() # this might complete before child initializes - - msg = latin('hello') - conn.send_bytes(msg) - self.assertEqual(conn.recv_bytes(), msg) - - conn.send_bytes(SENTINEL) - conn.close() - p.join() - - def test_sendbytes(self): - if self.TYPE != 'processes': - return - - msg = latin('abcdefghijklmnopqrstuvwxyz') - a, b = self.Pipe() - - a.send_bytes(msg) - self.assertEqual(b.recv_bytes(), msg) - - a.send_bytes(msg, 5) - self.assertEqual(b.recv_bytes(), msg[5:]) - - a.send_bytes(msg, 7, 8) - self.assertEqual(b.recv_bytes(), msg[7:7 + 8]) - - a.send_bytes(msg, 26) - self.assertEqual(b.recv_bytes(), latin('')) - - a.send_bytes(msg, 26, 0) - self.assertEqual(b.recv_bytes(), latin('')) - - self.assertRaises(ValueError, a.send_bytes, msg, 27) - self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) - self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) - self.assertRaises(ValueError, a.send_bytes, msg, -1) - self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) - - -class _TestListenerClient(BaseTestCase): - - ALLOWED_TYPES = ('processes', 'threads') - - def _test(self, address): - conn = self.connection.Client(address) - conn.send('hello') - conn.close() - - def test_listener_client(self): - for family in self.connection.families: - l = self.connection.Listener(family=family) - p = self.Process(target=self._test, args=(l.address,)) - p.daemon = True - p.start() - conn = l.accept() - self.assertEqual(conn.recv(), 'hello') - p.join() - l.close() -''' -class _TestPicklingConnections(BaseTestCase): - """Test of sending connection and socket objects between processes""" - - ALLOWED_TYPES = ('processes',) - - def _listener(self, conn, families): - for fam in families: - l = self.connection.Listener(family=fam) - conn.send(l.address) - new_conn = l.accept() - conn.send(new_conn) - - if self.TYPE == 'processes': - l = socket.socket() - l.bind(('localhost', 0)) - conn.send(l.getsockname()) - l.listen(1) - new_conn, addr = l.accept() - conn.send(new_conn) - - conn.recv() - - def _remote(self, conn): - for (address, msg) in iter(conn.recv, None): - client = self.connection.Client(address) - client.send(msg.upper()) - client.close() - - if self.TYPE == 'processes': - address, msg = conn.recv() - client = socket.socket() - client.connect(address) - client.sendall(msg.upper()) - client.close() - - conn.close() - - def test_pickling(self): - try: - billiard.allow_connection_pickling() - except ImportError: - return - - families = self.connection.families - - lconn, lconn0 = self.Pipe() - lp = self.Process(target=self._listener, args=(lconn0, families)) - lp.start() - lconn0.close() - - rconn, rconn0 = self.Pipe() - rp = self.Process(target=self._remote, args=(rconn0,)) - rp.start() - rconn0.close() - - for fam in families: - msg = ('This connection uses family %s' % fam).encode('ascii') - address = lconn.recv() - rconn.send((address, msg)) - new_conn = lconn.recv() - self.assertEqual(new_conn.recv(), msg.upper()) - - rconn.send(None) - - if self.TYPE == 'processes': - msg = latin('This connection uses a normal socket') - address = lconn.recv() - rconn.send((address, msg)) - if hasattr(socket, 'fromfd'): - new_conn = lconn.recv() - self.assertEqual(new_conn.recv(100), msg.upper()) - else: - # XXX On Windows with Py2.6 need to backport fromfd() - discard = lconn.recv_bytes() - - lconn.send(None) - - rconn.close() - lconn.close() - - lp.join() - rp.join() - -''' - - -class _TestHeap(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def test_heap(self): - iterations = 5000 - maxblocks = 50 - blocks = [] - - # create and destroy lots of blocks of different sizes - for i in xrange(iterations): - size = int(random.lognormvariate(0, 1) * 1000) - b = billiard.heap.BufferWrapper(size) - blocks.append(b) - if len(blocks) > maxblocks: - i = random.randrange(maxblocks) - del blocks[i] - - # get the heap object - heap = billiard.heap.BufferWrapper._heap - - # verify the state of the heap - all = [] - occupied = 0 - for L in heap._len_to_seq.values(): - for arena, start, stop in L: - all.append((heap._arenas.index(arena), start, stop, - stop - start, 'free')) - for arena, start, stop in heap._allocated_blocks: - all.append((heap._arenas.index(arena), start, stop, - stop - start, 'occupied')) - occupied += stop - start - - all.sort() - - for i in range(len(all) - 1): - (arena, start, stop) = all[i][:3] - (narena, nstart, nstop) = all[i + 1][:3] - self.assertTrue((arena != narena and nstart == 0) or - (stop == nstart)) - - -class _Foo(Structure): - _fields_ = [ - ('x', c_int), - ('y', c_double) - ] - - -class _TestSharedCTypes(BaseTestCase): - - ALLOWED_TYPES = ('processes', ) - - def _double(self, x, y, foo, arr, string): - x.value *= 2 - y.value *= 2 - foo.x *= 2 - foo.y *= 2 - string.value *= 2 - for i in range(len(arr)): - arr[i] *= 2 - - @unittest.skipIf(Value is None, "requires ctypes.Value") - def test_sharedctypes(self, lock=False): - x = Value('i', 7, lock=lock) - y = Value(c_double, 1.0 / 3.0, lock=lock) - foo = Value(_Foo, 3, 2, lock=lock) - arr = self.Array('d', range(10), lock=lock) - string = self.Array('c', 20, lock=lock) - string.value = 'hello' - - p = self.Process(target=self._double, args=(x, y, foo, arr, string)) - p.start() - p.join() - - self.assertEqual(x.value, 14) - self.assertAlmostEqual(y.value, 2.0 / 3.0) - self.assertEqual(foo.x, 6) - self.assertAlmostEqual(foo.y, 4.0) - for i in range(10): - self.assertAlmostEqual(arr[i], i * 2) - self.assertEqual(string.value, latin('hellohello')) - - @unittest.skipIf(Value is None, "requires ctypes.Value") - def test_synchronize(self): - self.test_sharedctypes(lock=True) - - @unittest.skipIf(ctypes_copy is None, "requires ctypes.copy") - def test_copy(self): - foo = _Foo(2, 5.0) - bar = ctypes_copy(foo) - foo.x = 0 - foo.y = 0 - self.assertEqual(bar.x, 2) - self.assertAlmostEqual(bar.y, 5.0) - - -class _TestFinalize(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def _test_finalize(self, conn): - class Foo(object): - pass - - a = Foo() - util.Finalize(a, conn.send, args=('a',)) - del a # triggers callback for a - - b = Foo() - close_b = util.Finalize(b, conn.send, args=('b',)) - close_b() # triggers callback for b - close_b() # does nothing because callback has already been called - del b # does nothing because callback has already been called - - c = Foo() - util.Finalize(c, conn.send, args=('c',)) - - d10 = Foo() - util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) - - d01 = Foo() - util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) - d02 = Foo() - util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) - d03 = Foo() - util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) - - util.Finalize(None, conn.send, args=('e',), exitpriority=-10) - - util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) - - # call mutliprocessing's cleanup function then exit process without - # garbage collecting locals - util._exit_function() - conn.close() - os._exit(0) - - def test_finalize(self): - conn, child_conn = self.Pipe() - - p = self.Process(target=self._test_finalize, args=(child_conn,)) - p.start() - p.join() - - result = [obj for obj in iter(conn.recv, 'STOP')] - self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) - - -class _TestImportStar(BaseTestCase): - """Test that from ... import * works for each module""" - ALLOWED_TYPES = ('processes',) - - def test_import(self): - modules = [ - 'billiard', 'billiard.connection', - 'billiard.heap', 'billiard.managers', - 'billiard.pool', 'billiard.process', - 'billiard.reduction', - 'billiard.synchronize', 'billiard.util' - ] - - if c_int is not None: - # This module requires _ctypes - modules.append('billiard.sharedctypes') - - for name in modules: - __import__(name) - mod = sys.modules[name] - - for attr in getattr(mod, '__all__', ()): - self.assertTrue( - hasattr(mod, attr), - '%r does not have attribute %r' % (mod, attr) - ) - - -class _TestLogging(BaseTestCase): - """Quick test that logging works -- does not test logging output""" - ALLOWED_TYPES = ('processes',) - - def test_enable_logging(self): - logger = billiard.get_logger() - logger.setLevel(util.SUBWARNING) - self.assertTrue(logger is not None) - logger.debug('this will not be printed') - logger.info('nor will this') - logger.setLevel(LOG_LEVEL) - - def _test_level(self, conn): - logger = billiard.get_logger() - conn.send(logger.getEffectiveLevel()) - - def test_level(self): - LEVEL1 = 32 - LEVEL2 = 37 - - logger = billiard.get_logger() - root_logger = logging.getLogger() - root_level = root_logger.level - - reader, writer = billiard.Pipe(duplex=False) - - logger.setLevel(LEVEL1) - self.Process(target=self._test_level, args=(writer,)).start() - self.assertEqual(LEVEL1, reader.recv()) - - logger.setLevel(logging.NOTSET) - root_logger.setLevel(LEVEL2) - self.Process(target=self._test_level, args=(writer,)).start() - self.assertEqual(LEVEL2, reader.recv()) - - root_logger.setLevel(root_level) - logger.setLevel(level=LOG_LEVEL) - - -# class _TestLoggingProcessName(BaseTestCase): -# -# def handle(self, record): -# assert record.processName == billiard.current_process().name -# self.__handled = True -# -# def test_logging(self): -# handler = logging.Handler() -# handler.handle = self.handle -# self.__handled = False -# # Bypass getLogger() and side-effects -# logger = logging.getLoggerClass()( -# 'billiard.test.TestLoggingProcessName') -# logger.addHandler(handler) -# logger.propagate = False -# -# logger.warn('foo') -# assert self.__handled - -# -# Test to verify handle verification, see issue 3321 -# - - -class TestInvalidHandle(unittest.TestCase): - - @unittest.skipIf(WIN32, "skipped on Windows") - def test_invalid_handles(self): - conn = _billiard.Connection(44977608) - self.assertRaises(IOError, conn.poll) - self.assertRaises(IOError, _billiard.Connection, -1) - - -def get_attributes(Source, names): - d = {} - for name in names: - obj = getattr(Source, name) - if type(obj) == type(get_attributes): - obj = staticmethod(obj) - d[name] = obj - return d - - -def create_test_cases(Mixin, type): - result = {} - glob = globals() - Type = type.capitalize() - - for name in glob.keys(): - if name.startswith('_Test'): - base = glob[name] - if type in base.ALLOWED_TYPES: - newname = 'With' + Type + name[1:] - - class Temp(base, unittest.TestCase, Mixin): - pass - - result[newname] = Temp - Temp.__name__ = newname - Temp.__module__ = Mixin.__module__ - return result - - -class ProcessesMixin(object): - TYPE = 'processes' - Process = billiard.Process - locals().update(get_attributes(billiard, ( - 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', - 'Condition', 'Event', 'Value', 'Array', 'RawValue', - 'RawArray', 'current_process', 'active_children', 'Pipe', - 'connection', 'JoinableQueue' - ))) - -testcases_processes = create_test_cases(ProcessesMixin, type='processes') -globals().update(testcases_processes) - - -class ManagerMixin(object): - TYPE = 'manager' - Process = billiard.Process - manager = object.__new__(billiard.managers.SyncManager) - locals().update(get_attributes(manager, ( - 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', - 'Condition', 'Event', 'Value', 'Array', 'list', 'dict', - 'Namespace', 'JoinableQueue' - ))) - -testcases_manager = create_test_cases(ManagerMixin, type='manager') -globals().update(testcases_manager) - - -class ThreadsMixin(object): - TYPE = 'threads' - Process = billiard.dummy.Process - locals().update(get_attributes(billiard.dummy, ( - 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', - 'Condition', 'Event', 'Value', 'Array', 'current_process', - 'active_children', 'Pipe', 'connection', 'dict', 'list', - 'Namespace', 'JoinableQueue' - ))) - -testcases_threads = create_test_cases(ThreadsMixin, type='threads') -globals().update(testcases_threads) - - -class OtherTest(unittest.TestCase): - # TODO: add more tests for deliver/answer challenge. - def test_deliver_challenge_auth_failure(self): - - class _FakeConnection(object): - - def recv_bytes(self, size): - return bytes('something bogus') - - def send_bytes(self, data): - pass - self.assertRaises(billiard.AuthenticationError, - billiard.connection.deliver_challenge, - _FakeConnection(), bytes('abc')) - - def test_answer_challenge_auth_failure(self): - - class _FakeConnection(object): - - def __init__(self): - self.count = 0 - - def recv_bytes(self, size): - self.count += 1 - if self.count == 1: - return billiard.connection.CHALLENGE - elif self.count == 2: - return bytes('something bogus') - return bytes('') - - def send_bytes(self, data): - pass - self.assertRaises(billiard.AuthenticationError, - billiard.connection.answer_challenge, - _FakeConnection(), bytes('abc')) - - -def initializer(ns): - ns.test += 1 - - -class TestInitializers(unittest.TestCase): - """Test Manager.start()/Pool.__init__() initializer feature - - - see issue 5585 - - """ - def setUp(self): - self.mgr = billiard.Manager() - self.ns = self.mgr.Namespace() - self.ns.test = 0 - - def tearDown(self): - self.mgr.shutdown() - - def test_manager_initializer(self): - m = billiard.managers.SyncManager() - self.assertRaises(TypeError, m.start, 1) - m.start(initializer, (self.ns,)) - self.assertEqual(self.ns.test, 1) - m.shutdown() - - def test_pool_initializer(self): - self.assertRaises(TypeError, billiard.Pool, initializer=1) - p = billiard.Pool(1, initializer, (self.ns,)) - p.close() - p.join() - self.assertEqual(self.ns.test, 1) - - -def _ThisSubProcess(q): - try: - q.get(block=False) - except Queue.Empty: - pass - - -def _TestProcess(q): - """Issue 5155, 5313, 5331: Test process in processes - - Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior - - """ - queue = billiard.Queue() - subProc = billiard.Process(target=_ThisSubProcess, args=(queue,)) - subProc.start() - subProc.join() - - -def _afunc(x): - return x * x - - -def pool_in_process(): - pool = billiard.Pool(processes=4) - pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) - - -class _file_like(object): - def __init__(self, delegate): - self._delegate = delegate - self._pid = None - - @property - def cache(self): - pid = os.getpid() - # There are no race conditions since fork keeps only the running thread - if pid != self._pid: - self._pid = pid - self._cache = [] - return self._cache - - def write(self, data): - self.cache.append(data) - - def flush(self): - self._delegate.write(''.join(self.cache)) - self._cache = [] - - -class TestStdinBadfiledescriptor(unittest.TestCase): - - def test_queue_in_process(self): - queue = billiard.Queue() - proc = billiard.Process(target=_TestProcess, args=(queue,)) - proc.start() - proc.join() - - def test_pool_in_process(self): - p = billiard.Process(target=pool_in_process) - p.start() - p.join() - - def test_flushing(self): - sio = StringIO() - flike = _file_like(sio) - flike.write('foo') - proc = billiard.Process(target=lambda: flike.flush()) - self.assertTrue(proc) - flike.flush() - assert sio.getvalue() == 'foo' - -testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, - TestStdinBadfiledescriptor] - - -def test_main(run=None): - if sys.platform.startswith("linux"): - try: - billiard.RLock() - except OSError: - raise SkipTest("OSError raises on RLock creation, see issue 3111!") - - if run is None: - from test.test_support import run_unittest as run - - util.get_temp_dir() # creates temp directory for use by all processes - - billiard.get_logger().setLevel(LOG_LEVEL) - - ProcessesMixin.pool = billiard.Pool(4) - ThreadsMixin.pool = billiard.dummy.Pool(4) - ManagerMixin.manager.__init__() - ManagerMixin.manager.start() - ManagerMixin.pool = ManagerMixin.manager.Pool(4) - - testcases = ( - sorted(testcases_processes.values(), key=lambda tc: tc.__name__) + - sorted(testcases_threads.values(), key=lambda tc: tc.__name__) + - sorted(testcases_manager.values(), key=lambda tc: tc.__name__) + - testcases_other - ) - - loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase - suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases) - # (ncoghlan): Whether or not sys.exc_clear is executed by the threading - # module during these tests is at least platform dependent and possibly - # non-deterministic on any given platform. So we don't mind if the listed - # warnings aren't actually raised. - with test_support.check_py3k_warnings( - (".+__(get|set)slice__ has been removed", DeprecationWarning), - (r"sys.exc_clear\(\) not supported", DeprecationWarning), - quiet=True): - run(suite) - - ThreadsMixin.pool.terminate() - ProcessesMixin.pool.terminate() - ManagerMixin.pool.terminate() - ManagerMixin.manager.shutdown() - - del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool - - -def main(): - test_main(unittest.TextTestRunner(verbosity=2).run) - -if __name__ == '__main__': - main() diff --git a/awx/lib/site-packages/iso8601/iso8601.py b/awx/lib/site-packages/iso8601/iso8601.py index 35dc31f798..becdd95812 100644 --- a/awx/lib/site-packages/iso8601/iso8601.py +++ b/awx/lib/site-packages/iso8601/iso8601.py @@ -32,27 +32,43 @@ else: ISO8601_REGEX = re.compile( r""" (?P[0-9]{4}) - (-{0,1}(?P[0-9]{1,2})){1} - (-{0,1}(?P[0-9]{1,2})){1} ( - (?P[ T]) - (?P[0-9]{2}) - (:{0,1}(?P[0-9]{2})){0,1} ( - :{0,1}(?P[0-9]{1,2}) - (\.(?P[0-9]+)){0,1} - ){0,1} - (?P - Z + (-(?P[0-9]{1,2})) | + (?P[0-9]{2}) + (?!$) # Don't allow YYYYMM + ) + ( ( - (?P[-+]) - (?P[0-9]{2}) - :{0,1} - (?P[0-9]{2}){0,1} + (-(?P[0-9]{1,2})) + | + (?P[0-9]{2}) ) - ){0,1} - ){0,1} + ( + ( + (?P[ T]) + (?P[0-9]{2}) + (:{0,1}(?P[0-9]{2})){0,1} + ( + :{0,1}(?P[0-9]{1,2}) + (\.(?P[0-9]+)){0,1} + ){0,1} + (?P + Z + | + ( + (?P[-+]) + (?P[0-9]{2}) + :{0,1} + (?P[0-9]{2}){0,1} + ) + ){0,1} + ){0,1} + ) + ){0,1} # YYYY-MM + ){0,1} # YYYY only + $ """, re.VERBOSE ) @@ -113,20 +129,22 @@ class FixedOffset(tzinfo): def __repr__(self): return "" % (self.__name, self.__offset) -def to_int(d, key, default_to_zero=False, default=None): +def to_int(d, key, default_to_zero=False, default=None, required=True): """Pull a value from the dict and convert to int :param default_to_zero: If the value is None or empty, treat it as zero :param default: If the value is missing in the dict use this default """ - value = d.get(key, default) + value = d.get(key) or default LOG.debug("Got %r for %r with default %r", value, key, default) if (value in ["", None]) and default_to_zero: return 0 if value is None: - raise ParseError("Unable to read %s from %s" % (key, d)) - return int(value) + if required: + raise ParseError("Unable to read %s from %s" % (key, d)) + else: + return int(value) def parse_timezone(matches, default_timezone=UTC): """Parses ISO 8601 time zone specs into tzinfo offsets @@ -172,8 +190,8 @@ def parse_date(datestring, default_timezone=UTC): try: return datetime( year=to_int(groups, "year"), - month=to_int(groups, "month"), - day=to_int(groups, "day"), + month=to_int(groups, "month", default=to_int(groups, "monthdash", required=False, default=1)), + day=to_int(groups, "day", default=to_int(groups, "daydash", required=False, default=1)), hour=to_int(groups, "hour", default_to_zero=True), minute=to_int(groups, "minute", default_to_zero=True), second=to_int(groups, "second", default_to_zero=True), diff --git a/awx/lib/site-packages/iso8601/test_iso8601.py b/awx/lib/site-packages/iso8601/test_iso8601.py index d34b07d02f..ed2d45a0b5 100644 --- a/awx/lib/site-packages/iso8601/test_iso8601.py +++ b/awx/lib/site-packages/iso8601/test_iso8601.py @@ -26,21 +26,27 @@ def test_parse_utc_different_default(): d = iso8601.parse_date("2007-01-01T08:00:00Z", default_timezone=tz) assert d == datetime.datetime(2007, 1, 1, 8, 0, 0, 0, iso8601.UTC) -@pytest.mark.parametrize("invalid_date", [ - ("2013-10-",), - ("2013-",), - ("",), - (None,), - ("23",), - ("131015T142533Z",), - ("131015",), - ("2007-06-23X06:40:34.00Z", ), # https://code.google.com/p/pyiso8601/issues/detail?id=14 - ("2007-06-23 06:40:34.00Zrubbish", ), # https://code.google.com/p/pyiso8601/issues/detail?id=14 +@pytest.mark.parametrize("invalid_date, error_string", [ + ("2013-10-", "Unable to parse date string"), + ("2013-", "Unable to parse date string"), + ("", "Unable to parse date string"), + (None, "Expecting a string"), + ("wibble", "Unable to parse date string"), + ("23", "Unable to parse date string"), + ("131015T142533Z", "Unable to parse date string"), + ("131015", "Unable to parse date string"), + ("20141", "Unable to parse date string"), + ("201402", "Unable to parse date string"), + ("2007-06-23X06:40:34.00Z", "Unable to parse date string"), # https://code.google.com/p/pyiso8601/issues/detail?id=14 + ("2007-06-23 06:40:34.00Zrubbish", "Unable to parse date string"), # https://code.google.com/p/pyiso8601/issues/detail?id=14 + ("20114-01-03T01:45:49", "Unable to parse date string"), ]) -def test_parse_invalid_date(invalid_date): +def test_parse_invalid_date(invalid_date, error_string): + assert isinstance(invalid_date, str) or invalid_date is None # Why? 'cos I've screwed up the parametrize before :) with pytest.raises(iso8601.ParseError) as exc: iso8601.parse_date(invalid_date) assert exc.errisinstance(iso8601.ParseError) + assert str(exc.value).startswith(error_string) @pytest.mark.parametrize("valid_date,expected_datetime,isoformat", [ ("2007-06-23 06:40:34.00Z", datetime.datetime(2007, 6, 23, 6, 40, 34, 0, iso8601.UTC), "2007-06-23T06:40:34+00:00"), # Handle a separator other than T @@ -48,7 +54,7 @@ def test_parse_invalid_date(invalid_date): ("2007-01-01T08:00:00", datetime.datetime(2007, 1, 1, 8, 0, 0, 0, iso8601.UTC), "2007-01-01T08:00:00+00:00"), # Handle timezone-less dates. Assumes UTC. http://code.google.com/p/pyiso8601/issues/detail?id=4 ("2006-10-20T15:34:56.123+02:30", datetime.datetime(2006, 10, 20, 15, 34, 56, 123000, iso8601.FixedOffset(2, 30, "+02:30")), None), ("2006-10-20T15:34:56Z", datetime.datetime(2006, 10, 20, 15, 34, 56, 0, iso8601.UTC), "2006-10-20T15:34:56+00:00"), - ("2007-5-7T11:43:55.328Z'", datetime.datetime(2007, 5, 7, 11, 43, 55, 328000, iso8601.UTC), "2007-05-07T11:43:55.328000+00:00"), # http://code.google.com/p/pyiso8601/issues/detail?id=6 + ("2007-5-7T11:43:55.328Z", datetime.datetime(2007, 5, 7, 11, 43, 55, 328000, iso8601.UTC), "2007-05-07T11:43:55.328000+00:00"), # http://code.google.com/p/pyiso8601/issues/detail?id=6 ("2006-10-20T15:34:56.123Z", datetime.datetime(2006, 10, 20, 15, 34, 56, 123000, iso8601.UTC), "2006-10-20T15:34:56.123000+00:00"), ("2013-10-15T18:30Z", datetime.datetime(2013, 10, 15, 18, 30, 0, 0, iso8601.UTC), "2013-10-15T18:30:00+00:00"), ("2013-10-15T22:30+04", datetime.datetime(2013, 10, 15, 22, 30, 0, 0, iso8601.FixedOffset(4, 0, "+04:00")), "2013-10-15T22:30:00+04:00"), #